1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
9 * Copyright (c) 2001-2009, The GROMACS Development Team
11 * Gromacs is a library for molecular simulation and trajectory analysis,
12 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
13 * a full list of developers and information, check out http://www.gromacs.org
15 * This program is free software; you can redistribute it and/or modify it under
16 * the terms of the GNU Lesser General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option) any
19 * As a special exception, you may use this file as part of a free software
20 * library without restriction. Specifically, if other files instantiate
21 * templates or use macros or inline functions from this file, or you compile
22 * this file and link it with other files to produce an executable, this
23 * file does not by itself cause the resulting executable to be covered by
24 * the GNU Lesser General Public License.
26 * In plain-speak: do not worry about classes/macros/templates either - only
27 * changes to the library have to be LGPL, not an application linking with it.
29 * To help fund GROMACS development, we humbly ask that you cite
30 * the papers people have written on it - you can find them on the website!
33 /* This is the innermost loop contents for the n vs n atom
34 * SSE2 single precision kernels.
38 /* When calculating RF or Ewald interactions we calculate the electrostatic
39 * forces on excluded atom pairs here in the non-bonded loops.
40 * But when energies and/or virial is required we calculate them
41 * separately to as then it is easier to separate the energy and virial
44 #if defined CHECK_EXCLS && defined CALC_COULOMB
48 /* Without exclusions and energies we only need to mask the cut-off,
49 * this can be faster with blendv (only available with SSE4.1 and later).
51 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
52 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
53 * With gcc this is slower, except for RF on Sandy Bridge.
54 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
56 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
59 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
60 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
63 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
69 int cj,aj,ajx,ajy,ajz;
72 /* Energy group indices for two atoms packed into one int */
73 int egp_jj[UNROLLJ/2];
77 /* Interaction (non-exclusion) mask of all 1's or 0's */
84 gmx_mm_pr jxSSE,jySSE,jzSSE;
85 gmx_mm_pr dx_SSE0,dy_SSE0,dz_SSE0;
86 gmx_mm_pr dx_SSE1,dy_SSE1,dz_SSE1;
87 gmx_mm_pr dx_SSE2,dy_SSE2,dz_SSE2;
88 gmx_mm_pr dx_SSE3,dy_SSE3,dz_SSE3;
89 gmx_mm_pr tx_SSE0,ty_SSE0,tz_SSE0;
90 gmx_mm_pr tx_SSE1,ty_SSE1,tz_SSE1;
91 gmx_mm_pr tx_SSE2,ty_SSE2,tz_SSE2;
92 gmx_mm_pr tx_SSE3,ty_SSE3,tz_SSE3;
93 gmx_mm_pr rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
94 gmx_mm_pr rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
95 gmx_mm_pr rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
96 gmx_mm_pr rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
98 /* wco: within cut-off, mask of all 1's or 0's */
104 #ifdef VDW_CUTOFF_CHECK
105 gmx_mm_pr wco_vdw_SSE0;
106 gmx_mm_pr wco_vdw_SSE1;
108 gmx_mm_pr wco_vdw_SSE2;
109 gmx_mm_pr wco_vdw_SSE3;
114 /* 1/r masked with the interaction mask */
115 gmx_mm_pr rinv_ex_SSE0;
116 gmx_mm_pr rinv_ex_SSE1;
117 gmx_mm_pr rinv_ex_SSE2;
118 gmx_mm_pr rinv_ex_SSE3;
126 /* The force (PME mesh force) we need to subtract from 1/r^2 */
132 #ifdef CALC_COUL_EWALD
133 gmx_mm_pr brsq_SSE0,brsq_SSE1,brsq_SSE2,brsq_SSE3;
134 gmx_mm_pr ewcorr_SSE0,ewcorr_SSE1,ewcorr_SSE2,ewcorr_SSE3;
137 /* frcoul = (1/r - fsub)*r */
138 gmx_mm_pr frcoul_SSE0;
139 gmx_mm_pr frcoul_SSE1;
140 gmx_mm_pr frcoul_SSE2;
141 gmx_mm_pr frcoul_SSE3;
143 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
144 gmx_mm_pr r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
145 gmx_mm_pr r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
146 gmx_mm_pr r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
147 gmx_mm_pr r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
148 /* Table index: rs converted to an int */
149 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
150 gmx_epi32 ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
152 __m128i ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
154 /* Linear force table values */
155 gmx_mm_pr ctab0_SSE0,ctab1_SSE0;
156 gmx_mm_pr ctab0_SSE1,ctab1_SSE1;
157 gmx_mm_pr ctab0_SSE2,ctab1_SSE2;
158 gmx_mm_pr ctab0_SSE3,ctab1_SSE3;
160 /* Quadratic energy table value */
161 gmx_mm_pr ctabv_SSE0;
162 gmx_mm_pr ctabv_SSE1;
163 gmx_mm_pr ctabv_SSE2;
164 gmx_mm_pr ctabv_SSE3;
167 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
168 /* The potential (PME mesh) we need to subtract from 1/r */
169 gmx_mm_pr vc_sub_SSE0;
170 gmx_mm_pr vc_sub_SSE1;
171 gmx_mm_pr vc_sub_SSE2;
172 gmx_mm_pr vc_sub_SSE3;
175 /* Electrostatic potential */
176 gmx_mm_pr vcoul_SSE0;
177 gmx_mm_pr vcoul_SSE1;
178 gmx_mm_pr vcoul_SSE2;
179 gmx_mm_pr vcoul_SSE3;
182 /* The force times 1/r */
183 gmx_mm_pr fscal_SSE0;
184 gmx_mm_pr fscal_SSE1;
185 gmx_mm_pr fscal_SSE2;
186 gmx_mm_pr fscal_SSE3;
190 /* LJ sigma_j/2 and sqrt(epsilon_j) */
191 gmx_mm_pr hsig_j_SSE,seps_j_SSE;
192 /* LJ sigma_ij and epsilon_ij */
193 gmx_mm_pr sig_SSE0,eps_SSE0;
194 gmx_mm_pr sig_SSE1,eps_SSE1;
196 gmx_mm_pr sig_SSE2,eps_SSE2;
197 gmx_mm_pr sig_SSE3,eps_SSE3;
200 gmx_mm_pr sig2_SSE0,sig6_SSE0;
201 gmx_mm_pr sig2_SSE1,sig6_SSE1;
203 gmx_mm_pr sig2_SSE2,sig6_SSE2;
204 gmx_mm_pr sig2_SSE3,sig6_SSE3;
206 #endif /* LJ_COMB_LB */
210 gmx_mm_pr c6s_j_SSE,c12s_j_SSE;
213 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
214 /* Index for loading LJ parameters, complicated when interleaving */
219 /* LJ C6 and C12 parameters, used with geometric comb. rule */
220 gmx_mm_pr c6_SSE0,c12_SSE0;
221 gmx_mm_pr c6_SSE1,c12_SSE1;
223 gmx_mm_pr c6_SSE2,c12_SSE2;
224 gmx_mm_pr c6_SSE3,c12_SSE3;
228 /* Intermediate variables for LJ calculation */
230 gmx_mm_pr rinvsix_SSE0;
231 gmx_mm_pr rinvsix_SSE1;
233 gmx_mm_pr rinvsix_SSE2;
234 gmx_mm_pr rinvsix_SSE3;
238 gmx_mm_pr sir_SSE0,sir2_SSE0,sir6_SSE0;
239 gmx_mm_pr sir_SSE1,sir2_SSE1,sir6_SSE1;
241 gmx_mm_pr sir_SSE2,sir2_SSE2,sir6_SSE2;
242 gmx_mm_pr sir_SSE3,sir2_SSE3,sir6_SSE3;
246 gmx_mm_pr FrLJ6_SSE0,FrLJ12_SSE0;
247 gmx_mm_pr FrLJ6_SSE1,FrLJ12_SSE1;
249 gmx_mm_pr FrLJ6_SSE2,FrLJ12_SSE2;
250 gmx_mm_pr FrLJ6_SSE3,FrLJ12_SSE3;
253 gmx_mm_pr VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
254 gmx_mm_pr VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
256 gmx_mm_pr VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
257 gmx_mm_pr VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
262 /* j-cluster index */
265 /* Atom indices (of the first atom in the cluster) */
267 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
268 #if UNROLLJ == STRIDE
271 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
274 #if UNROLLJ == STRIDE
277 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
283 #ifndef GMX_MM256_HERE
285 /* Load integer interaction mask */
286 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
288 /* The is no unequal sse instruction, so we need a not here */
289 int_SSE0 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
290 int_SSE1 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
291 int_SSE2 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
292 int_SSE3 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
297 /* Load integer interaction mask */
298 /* With AVX there are no integer operations, so cast to real */
299 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
300 /* We can't compare all 4*8=32 float bits: shift the mask */
301 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
302 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
303 * With gcc we don't need the cast, but it's faster.
305 #define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
306 int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
307 int_SSE1 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
308 int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
309 int_SSE3 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
312 /* Load integer interaction mask */
313 /* With AVX there are no integer operations,
314 * and there is no int to double conversion, so cast to float
316 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
317 #define cast_cvt(x) _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
318 int_SSE0 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
319 int_SSE1 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
320 int_SSE2 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
321 int_SSE3 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
327 /* load j atom coordinates */
328 jxSSE = gmx_load_pr(x+ajx);
329 jySSE = gmx_load_pr(x+ajy);
330 jzSSE = gmx_load_pr(x+ajz);
332 /* Calculate distance */
333 dx_SSE0 = gmx_sub_pr(ix_SSE0,jxSSE);
334 dy_SSE0 = gmx_sub_pr(iy_SSE0,jySSE);
335 dz_SSE0 = gmx_sub_pr(iz_SSE0,jzSSE);
336 dx_SSE1 = gmx_sub_pr(ix_SSE1,jxSSE);
337 dy_SSE1 = gmx_sub_pr(iy_SSE1,jySSE);
338 dz_SSE1 = gmx_sub_pr(iz_SSE1,jzSSE);
339 dx_SSE2 = gmx_sub_pr(ix_SSE2,jxSSE);
340 dy_SSE2 = gmx_sub_pr(iy_SSE2,jySSE);
341 dz_SSE2 = gmx_sub_pr(iz_SSE2,jzSSE);
342 dx_SSE3 = gmx_sub_pr(ix_SSE3,jxSSE);
343 dy_SSE3 = gmx_sub_pr(iy_SSE3,jySSE);
344 dz_SSE3 = gmx_sub_pr(iz_SSE3,jzSSE);
346 /* rsq = dx*dx+dy*dy+dz*dz */
347 rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
348 rsq_SSE1 = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
349 rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
350 rsq_SSE3 = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
352 #ifndef CUTOFF_BLENDV
353 wco_SSE0 = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
354 wco_SSE1 = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
355 wco_SSE2 = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
356 wco_SSE3 = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
361 /* Only remove the (sub-)diagonal to avoid double counting */
362 #if UNROLLJ == UNROLLI
365 wco_SSE0 = gmx_and_pr(wco_SSE0,diag_SSE0);
366 wco_SSE1 = gmx_and_pr(wco_SSE1,diag_SSE1);
367 wco_SSE2 = gmx_and_pr(wco_SSE2,diag_SSE2);
368 wco_SSE3 = gmx_and_pr(wco_SSE3,diag_SSE3);
371 #if UNROLLJ < UNROLLI
374 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
375 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
376 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
377 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
379 if (cj == ci_sh*2 + 1)
381 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
382 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
383 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
384 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
389 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
390 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
391 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
392 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
394 else if (cj*2 + 1 == ci_sh)
396 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
397 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
398 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
399 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
403 #else /* EXCL_FORCES */
404 /* Remove all excluded atom pairs from the list */
405 wco_SSE0 = gmx_and_pr(wco_SSE0,int_SSE0);
406 wco_SSE1 = gmx_and_pr(wco_SSE1,int_SSE1);
407 wco_SSE2 = gmx_and_pr(wco_SSE2,int_SSE2);
408 wco_SSE3 = gmx_and_pr(wco_SSE3,int_SSE3);
416 for(i=0; i<UNROLLI; i++)
418 gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
419 for(j=0; j<UNROLLJ; j++)
431 /* For excluded pairs add a small number to avoid r^-6 = NaN */
432 rsq_SSE0 = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
433 rsq_SSE1 = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
434 rsq_SSE2 = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
435 rsq_SSE3 = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
440 rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
441 rinv_SSE1 = gmx_invsqrt_pr(rsq_SSE1);
442 rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
443 rinv_SSE3 = gmx_invsqrt_pr(rsq_SSE3);
445 GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
446 GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
450 /* Load parameters for j atom */
451 jq_SSE = gmx_load_pr(q+aj);
452 qq_SSE0 = gmx_mul_pr(iq_SSE0,jq_SSE);
453 qq_SSE1 = gmx_mul_pr(iq_SSE1,jq_SSE);
454 qq_SSE2 = gmx_mul_pr(iq_SSE2,jq_SSE);
455 qq_SSE3 = gmx_mul_pr(iq_SSE3,jq_SSE);
460 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
461 load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
462 load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
464 load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
465 load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
467 #endif /* not defined any LJ rule */
470 c6s_j_SSE = gmx_load_pr(ljc+aj2+0);
471 c12s_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
472 c6_SSE0 = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
473 c6_SSE1 = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
475 c6_SSE2 = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
476 c6_SSE3 = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
478 c12_SSE0 = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
479 c12_SSE1 = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
481 c12_SSE2 = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
482 c12_SSE3 = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
484 #endif /* LJ_COMB_GEOM */
487 hsig_j_SSE = gmx_load_pr(ljc+aj2+0);
488 seps_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
490 sig_SSE0 = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
491 sig_SSE1 = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
492 eps_SSE0 = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
493 eps_SSE1 = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
495 sig_SSE2 = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
496 sig_SSE3 = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
497 eps_SSE2 = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
498 eps_SSE3 = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
500 #endif /* LJ_COMB_LB */
504 #ifndef CUTOFF_BLENDV
505 rinv_SSE0 = gmx_and_pr(rinv_SSE0,wco_SSE0);
506 rinv_SSE1 = gmx_and_pr(rinv_SSE1,wco_SSE1);
507 rinv_SSE2 = gmx_and_pr(rinv_SSE2,wco_SSE2);
508 rinv_SSE3 = gmx_and_pr(rinv_SSE3,wco_SSE3);
510 /* We only need to mask for the cut-off: blendv is faster */
511 rinv_SSE0 = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
512 rinv_SSE1 = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
513 rinv_SSE2 = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
514 rinv_SSE3 = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
517 rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
518 rinvsq_SSE1 = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
519 rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
520 rinvsq_SSE3 = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
523 /* Note that here we calculate force*r, not the usual force/r.
524 * This allows avoiding masking the reaction-field contribution,
525 * as frcoul is later multiplied by rinvsq which has been
526 * masked with the cut-off check.
530 /* Only add 1/r for non-excluded atom pairs */
531 rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0,int_SSE0);
532 rinv_ex_SSE1 = gmx_and_pr(rinv_SSE1,int_SSE1);
533 rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2,int_SSE2);
534 rinv_ex_SSE3 = gmx_and_pr(rinv_SSE3,int_SSE3);
536 /* No exclusion forces, we always need 1/r */
537 #define rinv_ex_SSE0 rinv_SSE0
538 #define rinv_ex_SSE1 rinv_SSE1
539 #define rinv_ex_SSE2 rinv_SSE2
540 #define rinv_ex_SSE3 rinv_SSE3
544 /* Electrostatic interactions */
545 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
546 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
547 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
548 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
551 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
552 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
553 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
554 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
558 #ifdef CALC_COUL_EWALD
559 /* We need to mask (or limit) rsq for the cut-off,
560 * as large distances can cause an overflow in gmx_pmecorrF/V.
562 #ifndef CUTOFF_BLENDV
563 brsq_SSE0 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE0,wco_SSE0));
564 brsq_SSE1 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE1,wco_SSE1));
565 brsq_SSE2 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE2,wco_SSE2));
566 brsq_SSE3 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE3,wco_SSE3));
568 /* Strangely, putting mul on a separate line is slower (icc 13) */
569 brsq_SSE0 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0)));
570 brsq_SSE1 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1)));
571 brsq_SSE2 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2)));
572 brsq_SSE3 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3)));
574 ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0),beta_SSE);
575 ewcorr_SSE1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1),beta_SSE);
576 ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2),beta_SSE);
577 ewcorr_SSE3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3),beta_SSE);
578 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(ewcorr_SSE0,brsq_SSE0)));
579 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(ewcorr_SSE1,brsq_SSE1)));
580 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(ewcorr_SSE2,brsq_SSE2)));
581 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(ewcorr_SSE3,brsq_SSE3)));
584 vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0),beta_SSE);
585 vc_sub_SSE1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1),beta_SSE);
586 vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2),beta_SSE);
587 vc_sub_SSE3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3),beta_SSE);
590 #endif /* CALC_COUL_EWALD */
593 /* Electrostatic interactions */
594 r_SSE0 = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
595 r_SSE1 = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
596 r_SSE2 = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
597 r_SSE3 = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
598 /* Convert r to scaled table units */
599 rs_SSE0 = gmx_mul_pr(r_SSE0,invtsp_SSE);
600 rs_SSE1 = gmx_mul_pr(r_SSE1,invtsp_SSE);
601 rs_SSE2 = gmx_mul_pr(r_SSE2,invtsp_SSE);
602 rs_SSE3 = gmx_mul_pr(r_SSE3,invtsp_SSE);
603 /* Truncate scaled r to an int */
604 ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
605 ti_SSE1 = gmx_cvttpr_epi32(rs_SSE1);
606 ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
607 ti_SSE3 = gmx_cvttpr_epi32(rs_SSE3);
608 #ifdef GMX_X86_SSE4_1
609 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
610 rf_SSE0 = gmx_floor_pr(rs_SSE0);
611 rf_SSE1 = gmx_floor_pr(rs_SSE1);
612 rf_SSE2 = gmx_floor_pr(rs_SSE2);
613 rf_SSE3 = gmx_floor_pr(rs_SSE3);
615 rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
616 rf_SSE1 = gmx_cvtepi32_pr(ti_SSE1);
617 rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
618 rf_SSE3 = gmx_cvtepi32_pr(ti_SSE3);
620 frac_SSE0 = gmx_sub_pr(rs_SSE0,rf_SSE0);
621 frac_SSE1 = gmx_sub_pr(rs_SSE1,rf_SSE1);
622 frac_SSE2 = gmx_sub_pr(rs_SSE2,rf_SSE2);
623 frac_SSE3 = gmx_sub_pr(rs_SSE3,rf_SSE3);
625 /* Load and interpolate table forces and possibly energies.
626 * Force and energy can be combined in one table, stride 4: FDV0
627 * or in two separate tables with stride 1: F and V
628 * Currently single precision uses FDV0, double F and V.
630 #ifndef CALC_ENERGIES
631 load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
632 load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
633 load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
634 load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
637 load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
638 load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
639 load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
640 load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
642 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
643 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
644 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
645 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
648 fsub_SSE0 = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
649 fsub_SSE1 = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
650 fsub_SSE2 = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
651 fsub_SSE3 = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
652 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
653 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
654 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
655 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
658 vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
659 vc_sub_SSE1 = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
660 vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
661 vc_sub_SSE3 = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
663 #endif /* CALC_COUL_TAB */
665 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
666 #ifndef NO_SHIFT_EWALD
667 /* Add Ewald potential shift to vc_sub for convenience */
669 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
670 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
671 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
672 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
674 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
675 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
676 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
677 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
681 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
682 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
683 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
684 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
689 /* Mask energy for cut-off and diagonal */
690 vcoul_SSE0 = gmx_and_pr(vcoul_SSE0,wco_SSE0);
691 vcoul_SSE1 = gmx_and_pr(vcoul_SSE1,wco_SSE1);
692 vcoul_SSE2 = gmx_and_pr(vcoul_SSE2,wco_SSE2);
693 vcoul_SSE3 = gmx_and_pr(vcoul_SSE3,wco_SSE3);
696 #endif /* CALC_COULOMB */
699 /* Lennard-Jones interaction */
701 #ifdef VDW_CUTOFF_CHECK
702 wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
703 wco_vdw_SSE1 = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
705 wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
706 wco_vdw_SSE3 = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
709 /* Same cut-off for Coulomb and VdW, reuse the registers */
710 #define wco_vdw_SSE0 wco_SSE0
711 #define wco_vdw_SSE1 wco_SSE1
712 #define wco_vdw_SSE2 wco_SSE2
713 #define wco_vdw_SSE3 wco_SSE3
717 rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
718 rinvsix_SSE1 = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
720 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,int_SSE0);
721 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,int_SSE1);
724 rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
725 rinvsix_SSE3 = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
727 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,int_SSE2);
728 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,int_SSE3);
731 #ifdef VDW_CUTOFF_CHECK
732 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
733 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
735 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
736 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
739 FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
740 FrLJ6_SSE1 = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
742 FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
743 FrLJ6_SSE3 = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
745 FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
746 FrLJ12_SSE1 = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
748 FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
749 FrLJ12_SSE3 = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
751 #endif /* not LJ_COMB_LB */
754 sir_SSE0 = gmx_mul_pr(sig_SSE0,rinv_SSE0);
755 sir_SSE1 = gmx_mul_pr(sig_SSE1,rinv_SSE1);
757 sir_SSE2 = gmx_mul_pr(sig_SSE2,rinv_SSE2);
758 sir_SSE3 = gmx_mul_pr(sig_SSE3,rinv_SSE3);
760 sir2_SSE0 = gmx_mul_pr(sir_SSE0,sir_SSE0);
761 sir2_SSE1 = gmx_mul_pr(sir_SSE1,sir_SSE1);
763 sir2_SSE2 = gmx_mul_pr(sir_SSE2,sir_SSE2);
764 sir2_SSE3 = gmx_mul_pr(sir_SSE3,sir_SSE3);
766 sir6_SSE0 = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
767 sir6_SSE1 = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
769 sir6_SSE0 = gmx_and_pr(sir6_SSE0,int_SSE0);
770 sir6_SSE1 = gmx_and_pr(sir6_SSE1,int_SSE1);
773 sir6_SSE2 = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
774 sir6_SSE3 = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
776 sir6_SSE2 = gmx_and_pr(sir6_SSE2,int_SSE2);
777 sir6_SSE3 = gmx_and_pr(sir6_SSE3,int_SSE3);
780 #ifdef VDW_CUTOFF_CHECK
781 sir6_SSE0 = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
782 sir6_SSE1 = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
784 sir6_SSE2 = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
785 sir6_SSE3 = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
788 FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0,sir6_SSE0);
789 FrLJ6_SSE1 = gmx_mul_pr(eps_SSE1,sir6_SSE1);
791 FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2,sir6_SSE2);
792 FrLJ6_SSE3 = gmx_mul_pr(eps_SSE3,sir6_SSE3);
794 FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
795 FrLJ12_SSE1 = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
797 FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
798 FrLJ12_SSE3 = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
800 #if defined CALC_ENERGIES
801 /* We need C6 and C12 to calculate the LJ potential shift */
802 sig2_SSE0 = gmx_mul_pr(sig_SSE0,sig_SSE0);
803 sig2_SSE1 = gmx_mul_pr(sig_SSE1,sig_SSE1);
805 sig2_SSE2 = gmx_mul_pr(sig_SSE2,sig_SSE2);
806 sig2_SSE3 = gmx_mul_pr(sig_SSE3,sig_SSE3);
808 sig6_SSE0 = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
809 sig6_SSE1 = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
811 sig6_SSE2 = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
812 sig6_SSE3 = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
814 c6_SSE0 = gmx_mul_pr(eps_SSE0,sig6_SSE0);
815 c6_SSE1 = gmx_mul_pr(eps_SSE1,sig6_SSE1);
817 c6_SSE2 = gmx_mul_pr(eps_SSE2,sig6_SSE2);
818 c6_SSE3 = gmx_mul_pr(eps_SSE3,sig6_SSE3);
820 c12_SSE0 = gmx_mul_pr(c6_SSE0,sig6_SSE0);
821 c12_SSE1 = gmx_mul_pr(c6_SSE1,sig6_SSE1);
823 c12_SSE2 = gmx_mul_pr(c6_SSE2,sig6_SSE2);
824 c12_SSE3 = gmx_mul_pr(c6_SSE3,sig6_SSE3);
827 #endif /* LJ_COMB_LB */
833 /* Extract the group pair index per j pair.
834 * Energy groups are stored per i-cluster, so things get
835 * complicated when the i- and j-cluster size don't match.
840 egps_j = nbat->energrp[cj>>1];
841 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
843 /* We assume UNROLLI <= UNROLLJ */
845 for(jdi=0; jdi<UNROLLJ/UNROLLI; jdi++)
848 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
849 for(jj=0; jj<(UNROLLI/2); jj++)
851 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
859 #ifndef ENERGY_GROUPS
860 vctotSSE = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
862 add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
863 add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
864 add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
865 add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
870 /* Calculate the LJ energies */
871 VLJ6_SSE0 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
872 VLJ6_SSE1 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
874 VLJ6_SSE2 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
875 VLJ6_SSE3 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
877 VLJ12_SSE0 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
878 VLJ12_SSE1 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
880 VLJ12_SSE2 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
881 VLJ12_SSE3 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
884 VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
885 VLJ_SSE1 = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
887 VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
888 VLJ_SSE3 = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
890 /* The potential shift should be removed for pairs beyond cut-off */
891 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
892 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
894 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
895 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
898 /* The potential shift should be removed for excluded pairs */
899 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,int_SSE0);
900 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,int_SSE1);
902 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,int_SSE2);
903 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,int_SSE3);
906 #ifndef ENERGY_GROUPS
907 VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
909 gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
911 gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
915 add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
916 add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
918 add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
919 add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
923 #endif /* CALC_ENERGIES */
926 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
928 gmx_add_pr(frcoul_SSE0,
932 gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
933 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,
935 gmx_add_pr(frcoul_SSE1,
939 gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
941 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
942 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
944 #if defined CALC_LJ && !defined HALF_LJ
945 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
947 gmx_add_pr(frcoul_SSE2,
951 gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
952 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,
954 gmx_add_pr(frcoul_SSE3,
958 gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
960 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
961 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
962 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
965 /* Calculate temporary vectorial force */
966 tx_SSE0 = gmx_mul_pr(fscal_SSE0,dx_SSE0);
967 tx_SSE1 = gmx_mul_pr(fscal_SSE1,dx_SSE1);
968 tx_SSE2 = gmx_mul_pr(fscal_SSE2,dx_SSE2);
969 tx_SSE3 = gmx_mul_pr(fscal_SSE3,dx_SSE3);
970 ty_SSE0 = gmx_mul_pr(fscal_SSE0,dy_SSE0);
971 ty_SSE1 = gmx_mul_pr(fscal_SSE1,dy_SSE1);
972 ty_SSE2 = gmx_mul_pr(fscal_SSE2,dy_SSE2);
973 ty_SSE3 = gmx_mul_pr(fscal_SSE3,dy_SSE3);
974 tz_SSE0 = gmx_mul_pr(fscal_SSE0,dz_SSE0);
975 tz_SSE1 = gmx_mul_pr(fscal_SSE1,dz_SSE1);
976 tz_SSE2 = gmx_mul_pr(fscal_SSE2,dz_SSE2);
977 tz_SSE3 = gmx_mul_pr(fscal_SSE3,dz_SSE3);
979 /* Increment i atom force */
980 fix_SSE0 = gmx_add_pr(fix_SSE0,tx_SSE0);
981 fix_SSE1 = gmx_add_pr(fix_SSE1,tx_SSE1);
982 fix_SSE2 = gmx_add_pr(fix_SSE2,tx_SSE2);
983 fix_SSE3 = gmx_add_pr(fix_SSE3,tx_SSE3);
984 fiy_SSE0 = gmx_add_pr(fiy_SSE0,ty_SSE0);
985 fiy_SSE1 = gmx_add_pr(fiy_SSE1,ty_SSE1);
986 fiy_SSE2 = gmx_add_pr(fiy_SSE2,ty_SSE2);
987 fiy_SSE3 = gmx_add_pr(fiy_SSE3,ty_SSE3);
988 fiz_SSE0 = gmx_add_pr(fiz_SSE0,tz_SSE0);
989 fiz_SSE1 = gmx_add_pr(fiz_SSE1,tz_SSE1);
990 fiz_SSE2 = gmx_add_pr(fiz_SSE2,tz_SSE2);
991 fiz_SSE3 = gmx_add_pr(fiz_SSE3,tz_SSE3);
993 /* Decrement j atom force */
995 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
997 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
999 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));
1012 #undef CUTOFF_BLENDV