Update copyright statements and change license to LGPL
[alexxy/gromacs.git] / src / mdlib / nbnxn_kernels / nbnxn_kernel_x86_simd_inner.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2009, The GROMACS Development Team
6  * Copyright (c) 2012, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37
38 /* This is the innermost loop contents for the n vs n atom
39  * SSE2 single precision kernels.
40  */
41
42
43 /* When calculating RF or Ewald interactions we calculate the electrostatic
44  * forces on excluded atom pairs here in the non-bonded loops.
45  * But when energies and/or virial is required we calculate them
46  * separately to as then it is easier to separate the energy and virial
47  * contributions.
48  */
49 #if defined CHECK_EXCLS && defined CALC_COULOMB
50 #define EXCL_FORCES
51 #endif
52
53 /* Without exclusions and energies we only need to mask the cut-off,
54  * this can be faster with blendv (only available with SSE4.1 and later).
55  */
56 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
57 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
58  * With gcc this is slower, except for RF on Sandy Bridge.
59  * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
60  */
61 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
62 #define CUTOFF_BLENDV
63 #endif
64 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
65  * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
66  * Tested with icc 13.
67  */
68 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
69 #define CUTOFF_BLENDV
70 #endif
71 #endif
72
73         {
74             int        cj,aj,ajx,ajy,ajz;
75
76 #ifdef ENERGY_GROUPS
77             /* Energy group indices for two atoms packed into one int */
78             int        egp_jj[UNROLLJ/2];
79 #endif
80
81 #ifdef CHECK_EXCLS
82             /* Interaction (non-exclusion) mask of all 1's or 0's */
83             gmx_mm_pr  int_SSE0;
84             gmx_mm_pr  int_SSE1;
85             gmx_mm_pr  int_SSE2;
86             gmx_mm_pr  int_SSE3;
87 #endif
88
89             gmx_mm_pr  jxSSE,jySSE,jzSSE;
90             gmx_mm_pr  dx_SSE0,dy_SSE0,dz_SSE0;
91             gmx_mm_pr  dx_SSE1,dy_SSE1,dz_SSE1;
92             gmx_mm_pr  dx_SSE2,dy_SSE2,dz_SSE2;
93             gmx_mm_pr  dx_SSE3,dy_SSE3,dz_SSE3;
94             gmx_mm_pr  tx_SSE0,ty_SSE0,tz_SSE0;
95             gmx_mm_pr  tx_SSE1,ty_SSE1,tz_SSE1;
96             gmx_mm_pr  tx_SSE2,ty_SSE2,tz_SSE2;
97             gmx_mm_pr  tx_SSE3,ty_SSE3,tz_SSE3;
98             gmx_mm_pr  rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
99             gmx_mm_pr  rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
100             gmx_mm_pr  rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
101             gmx_mm_pr  rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
102 #ifndef CUTOFF_BLENDV
103             /* wco: within cut-off, mask of all 1's or 0's */
104             gmx_mm_pr  wco_SSE0;
105             gmx_mm_pr  wco_SSE1;
106             gmx_mm_pr  wco_SSE2;
107             gmx_mm_pr  wco_SSE3;
108 #endif
109 #ifdef VDW_CUTOFF_CHECK
110             gmx_mm_pr  wco_vdw_SSE0;
111             gmx_mm_pr  wco_vdw_SSE1;
112 #ifndef HALF_LJ
113             gmx_mm_pr  wco_vdw_SSE2;
114             gmx_mm_pr  wco_vdw_SSE3;
115 #endif
116 #endif
117 #ifdef CALC_COULOMB
118 #ifdef CHECK_EXCLS
119             /* 1/r masked with the interaction mask */
120             gmx_mm_pr  rinv_ex_SSE0;
121             gmx_mm_pr  rinv_ex_SSE1;
122             gmx_mm_pr  rinv_ex_SSE2;
123             gmx_mm_pr  rinv_ex_SSE3;
124 #endif
125             gmx_mm_pr  jq_SSE;
126             gmx_mm_pr  qq_SSE0;
127             gmx_mm_pr  qq_SSE1;
128             gmx_mm_pr  qq_SSE2;
129             gmx_mm_pr  qq_SSE3;
130 #ifdef CALC_COUL_TAB
131             /* The force (PME mesh force) we need to subtract from 1/r^2 */
132             gmx_mm_pr  fsub_SSE0;
133             gmx_mm_pr  fsub_SSE1;
134             gmx_mm_pr  fsub_SSE2;
135             gmx_mm_pr  fsub_SSE3;
136 #endif
137 #ifdef CALC_COUL_EWALD
138             gmx_mm_pr  brsq_SSE0,brsq_SSE1,brsq_SSE2,brsq_SSE3;
139             gmx_mm_pr  ewcorr_SSE0,ewcorr_SSE1,ewcorr_SSE2,ewcorr_SSE3;
140 #endif
141
142             /* frcoul = (1/r - fsub)*r */
143             gmx_mm_pr  frcoul_SSE0;
144             gmx_mm_pr  frcoul_SSE1;
145             gmx_mm_pr  frcoul_SSE2;
146             gmx_mm_pr  frcoul_SSE3;
147 #ifdef CALC_COUL_TAB
148             /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
149             gmx_mm_pr  r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
150             gmx_mm_pr  r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
151             gmx_mm_pr  r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
152             gmx_mm_pr  r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
153             /* Table index: rs converted to an int */ 
154 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
155             gmx_epi32  ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
156 #else
157             __m128i    ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
158 #endif
159             /* Linear force table values */
160             gmx_mm_pr  ctab0_SSE0,ctab1_SSE0;
161             gmx_mm_pr  ctab0_SSE1,ctab1_SSE1;
162             gmx_mm_pr  ctab0_SSE2,ctab1_SSE2;
163             gmx_mm_pr  ctab0_SSE3,ctab1_SSE3;
164 #ifdef CALC_ENERGIES
165             /* Quadratic energy table value */
166             gmx_mm_pr  ctabv_SSE0;
167             gmx_mm_pr  ctabv_SSE1;
168             gmx_mm_pr  ctabv_SSE2;
169             gmx_mm_pr  ctabv_SSE3;
170 #endif
171 #endif
172 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
173             /* The potential (PME mesh) we need to subtract from 1/r */
174             gmx_mm_pr  vc_sub_SSE0;
175             gmx_mm_pr  vc_sub_SSE1;
176             gmx_mm_pr  vc_sub_SSE2;
177             gmx_mm_pr  vc_sub_SSE3;
178 #endif
179 #ifdef CALC_ENERGIES
180             /* Electrostatic potential */
181             gmx_mm_pr  vcoul_SSE0;
182             gmx_mm_pr  vcoul_SSE1;
183             gmx_mm_pr  vcoul_SSE2;
184             gmx_mm_pr  vcoul_SSE3;
185 #endif
186 #endif
187             /* The force times 1/r */
188             gmx_mm_pr  fscal_SSE0;
189             gmx_mm_pr  fscal_SSE1;
190             gmx_mm_pr  fscal_SSE2;
191             gmx_mm_pr  fscal_SSE3;
192
193 #ifdef CALC_LJ
194 #ifdef LJ_COMB_LB
195             /* LJ sigma_j/2 and sqrt(epsilon_j) */
196             gmx_mm_pr  hsig_j_SSE,seps_j_SSE;
197             /* LJ sigma_ij and epsilon_ij */
198             gmx_mm_pr  sig_SSE0,eps_SSE0;
199             gmx_mm_pr  sig_SSE1,eps_SSE1;
200 #ifndef HALF_LJ
201             gmx_mm_pr  sig_SSE2,eps_SSE2;
202             gmx_mm_pr  sig_SSE3,eps_SSE3;
203 #endif
204 #ifdef CALC_ENERGIES
205             gmx_mm_pr  sig2_SSE0,sig6_SSE0;
206             gmx_mm_pr  sig2_SSE1,sig6_SSE1;
207 #ifndef HALF_LJ
208             gmx_mm_pr  sig2_SSE2,sig6_SSE2;
209             gmx_mm_pr  sig2_SSE3,sig6_SSE3;
210 #endif
211 #endif /* LJ_COMB_LB */
212 #endif /* CALC_LJ */
213
214 #ifdef LJ_COMB_GEOM
215             gmx_mm_pr  c6s_j_SSE,c12s_j_SSE;
216 #endif
217
218 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
219             /* Index for loading LJ parameters, complicated when interleaving */
220             int         aj2;
221 #endif
222
223 #ifndef FIX_LJ_C
224             /* LJ C6 and C12 parameters, used with geometric comb. rule */
225             gmx_mm_pr  c6_SSE0,c12_SSE0;
226             gmx_mm_pr  c6_SSE1,c12_SSE1;
227 #ifndef HALF_LJ
228             gmx_mm_pr  c6_SSE2,c12_SSE2;
229             gmx_mm_pr  c6_SSE3,c12_SSE3;
230 #endif
231 #endif
232
233             /* Intermediate variables for LJ calculation */
234 #ifndef LJ_COMB_LB
235             gmx_mm_pr  rinvsix_SSE0;
236             gmx_mm_pr  rinvsix_SSE1;
237 #ifndef HALF_LJ
238             gmx_mm_pr  rinvsix_SSE2;
239             gmx_mm_pr  rinvsix_SSE3;
240 #endif
241 #endif
242 #ifdef LJ_COMB_LB
243             gmx_mm_pr  sir_SSE0,sir2_SSE0,sir6_SSE0;
244             gmx_mm_pr  sir_SSE1,sir2_SSE1,sir6_SSE1;
245 #ifndef HALF_LJ
246             gmx_mm_pr  sir_SSE2,sir2_SSE2,sir6_SSE2;
247             gmx_mm_pr  sir_SSE3,sir2_SSE3,sir6_SSE3;
248 #endif
249 #endif
250
251             gmx_mm_pr  FrLJ6_SSE0,FrLJ12_SSE0;
252             gmx_mm_pr  FrLJ6_SSE1,FrLJ12_SSE1;
253 #ifndef HALF_LJ
254             gmx_mm_pr  FrLJ6_SSE2,FrLJ12_SSE2;
255             gmx_mm_pr  FrLJ6_SSE3,FrLJ12_SSE3;
256 #endif
257 #ifdef CALC_ENERGIES
258             gmx_mm_pr  VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
259             gmx_mm_pr  VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
260 #ifndef HALF_LJ
261             gmx_mm_pr  VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
262             gmx_mm_pr  VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
263 #endif
264 #endif
265 #endif /* CALC_LJ */
266
267             /* j-cluster index */
268             cj            = l_cj[cjind].cj;
269
270             /* Atom indices (of the first atom in the cluster) */
271             aj            = cj*UNROLLJ;
272 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
273 #if UNROLLJ == STRIDE
274             aj2           = aj*2;
275 #else
276             aj2           = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
277 #endif
278 #endif
279 #if UNROLLJ == STRIDE
280             ajx           = aj*DIM;
281 #else
282             ajx           = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
283 #endif
284             ajy           = ajx + STRIDE;
285             ajz           = ajy + STRIDE;
286
287 #ifdef CHECK_EXCLS
288 #ifndef GMX_MM256_HERE
289             {
290                 /* Load integer interaction mask */
291                 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
292
293                 /* The is no unequal sse instruction, so we need a not here */
294                 int_SSE0  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
295                 int_SSE1  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
296                 int_SSE2  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
297                 int_SSE3  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
298             }
299 #else
300             {
301 #ifndef GMX_DOUBLE
302                 /* Load integer interaction mask */
303                 /* With AVX there are no integer operations, so cast to real */
304                 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
305                 /* We can't compare all 4*8=32 float bits: shift the mask */
306                 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
307                 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
308                  * With gcc we don't need the cast, but it's faster.
309                  */
310 #define cast_cvt(x)  _mm256_cvtepi32_ps(_mm256_castps_si256(x))
311                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
312                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
313                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
314                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
315 #undef cast_cvt
316 #else
317                 /* Load integer interaction mask */
318                 /* With AVX there are no integer operations,
319                  * and there is no int to double conversion, so cast to float
320                  */
321                 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
322 #define cast_cvt(x)  _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
323                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
324                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
325                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
326                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
327 #undef cast_cvt
328 #endif
329             }
330 #endif
331 #endif
332             /* load j atom coordinates */
333             jxSSE         = gmx_load_pr(x+ajx);
334             jySSE         = gmx_load_pr(x+ajy);
335             jzSSE         = gmx_load_pr(x+ajz);
336             
337             /* Calculate distance */
338             dx_SSE0       = gmx_sub_pr(ix_SSE0,jxSSE);
339             dy_SSE0       = gmx_sub_pr(iy_SSE0,jySSE);
340             dz_SSE0       = gmx_sub_pr(iz_SSE0,jzSSE);
341             dx_SSE1       = gmx_sub_pr(ix_SSE1,jxSSE);
342             dy_SSE1       = gmx_sub_pr(iy_SSE1,jySSE);
343             dz_SSE1       = gmx_sub_pr(iz_SSE1,jzSSE);
344             dx_SSE2       = gmx_sub_pr(ix_SSE2,jxSSE);
345             dy_SSE2       = gmx_sub_pr(iy_SSE2,jySSE);
346             dz_SSE2       = gmx_sub_pr(iz_SSE2,jzSSE);
347             dx_SSE3       = gmx_sub_pr(ix_SSE3,jxSSE);
348             dy_SSE3       = gmx_sub_pr(iy_SSE3,jySSE);
349             dz_SSE3       = gmx_sub_pr(iz_SSE3,jzSSE);
350             
351             /* rsq = dx*dx+dy*dy+dz*dz */
352             rsq_SSE0      = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
353             rsq_SSE1      = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
354             rsq_SSE2      = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
355             rsq_SSE3      = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
356
357 #ifndef CUTOFF_BLENDV
358             wco_SSE0      = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
359             wco_SSE1      = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
360             wco_SSE2      = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
361             wco_SSE3      = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
362 #endif
363
364 #ifdef CHECK_EXCLS
365 #ifdef EXCL_FORCES
366             /* Only remove the (sub-)diagonal to avoid double counting */
367 #if UNROLLJ == UNROLLI
368             if (cj == ci_sh)
369             {
370                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag_SSE0);
371                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag_SSE1);
372                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag_SSE2);
373                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag_SSE3);
374             }
375 #else
376 #if UNROLLJ < UNROLLI
377             if (cj == ci_sh*2)
378             {
379                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
380                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
381                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
382                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
383             }
384             if (cj == ci_sh*2 + 1)
385             { 
386                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
387                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
388                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
389                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
390             }
391 #else
392             if (cj*2 == ci_sh)
393             {
394                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
395                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
396                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
397                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
398             }
399             else if (cj*2 + 1 == ci_sh)
400             {
401                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
402                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
403                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
404                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
405             }
406 #endif
407 #endif
408 #else /* EXCL_FORCES */
409             /* Remove all excluded atom pairs from the list */
410             wco_SSE0      = gmx_and_pr(wco_SSE0,int_SSE0);
411             wco_SSE1      = gmx_and_pr(wco_SSE1,int_SSE1);
412             wco_SSE2      = gmx_and_pr(wco_SSE2,int_SSE2);
413             wco_SSE3      = gmx_and_pr(wco_SSE3,int_SSE3);
414 #endif
415 #endif
416
417 #ifdef COUNT_PAIRS
418             {
419                 int i,j;
420                 real tmp[UNROLLJ];
421                 for(i=0; i<UNROLLI; i++)
422                 {
423                     gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
424                     for(j=0; j<UNROLLJ; j++)
425                     {
426                         if (!(tmp[j] == 0))
427                         {
428                             npair++;
429                         }
430                     }
431                 }
432             }
433 #endif
434
435 #ifdef CHECK_EXCLS
436             /* For excluded pairs add a small number to avoid r^-6 = NaN */
437             rsq_SSE0      = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
438             rsq_SSE1      = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
439             rsq_SSE2      = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
440             rsq_SSE3      = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
441 #endif
442
443             /* Calculate 1/r */
444 #ifndef GMX_DOUBLE
445             rinv_SSE0     = gmx_invsqrt_pr(rsq_SSE0);
446             rinv_SSE1     = gmx_invsqrt_pr(rsq_SSE1);
447             rinv_SSE2     = gmx_invsqrt_pr(rsq_SSE2);
448             rinv_SSE3     = gmx_invsqrt_pr(rsq_SSE3);
449 #else
450             GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
451             GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
452 #endif
453
454 #ifdef CALC_COULOMB
455             /* Load parameters for j atom */
456             jq_SSE        = gmx_load_pr(q+aj);
457             qq_SSE0       = gmx_mul_pr(iq_SSE0,jq_SSE);
458             qq_SSE1       = gmx_mul_pr(iq_SSE1,jq_SSE);
459             qq_SSE2       = gmx_mul_pr(iq_SSE2,jq_SSE);
460             qq_SSE3       = gmx_mul_pr(iq_SSE3,jq_SSE);
461 #endif
462
463 #ifdef CALC_LJ
464
465 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
466             load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
467             load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
468 #ifndef HALF_LJ
469             load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
470             load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
471 #endif
472 #endif /* not defined any LJ rule */
473
474 #ifdef LJ_COMB_GEOM
475             c6s_j_SSE     = gmx_load_pr(ljc+aj2+0);
476             c12s_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
477             c6_SSE0       = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
478             c6_SSE1       = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
479 #ifndef HALF_LJ
480             c6_SSE2       = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
481             c6_SSE3       = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
482 #endif
483             c12_SSE0      = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
484             c12_SSE1      = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
485 #ifndef HALF_LJ
486             c12_SSE2      = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
487             c12_SSE3      = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
488 #endif
489 #endif /* LJ_COMB_GEOM */
490
491 #ifdef LJ_COMB_LB
492             hsig_j_SSE    = gmx_load_pr(ljc+aj2+0);
493             seps_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
494
495             sig_SSE0      = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
496             sig_SSE1      = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
497             eps_SSE0      = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
498             eps_SSE1      = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
499 #ifndef HALF_LJ
500             sig_SSE2      = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
501             sig_SSE3      = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
502             eps_SSE2      = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
503             eps_SSE3      = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
504 #endif
505 #endif /* LJ_COMB_LB */
506
507 #endif /* CALC_LJ */
508
509 #ifndef CUTOFF_BLENDV
510             rinv_SSE0     = gmx_and_pr(rinv_SSE0,wco_SSE0);
511             rinv_SSE1     = gmx_and_pr(rinv_SSE1,wco_SSE1);
512             rinv_SSE2     = gmx_and_pr(rinv_SSE2,wco_SSE2);
513             rinv_SSE3     = gmx_and_pr(rinv_SSE3,wco_SSE3);
514 #else
515             /* We only need to mask for the cut-off: blendv is faster */
516             rinv_SSE0     = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
517             rinv_SSE1     = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
518             rinv_SSE2     = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
519             rinv_SSE3     = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
520 #endif
521
522             rinvsq_SSE0   = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
523             rinvsq_SSE1   = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
524             rinvsq_SSE2   = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
525             rinvsq_SSE3   = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
526
527 #ifdef CALC_COULOMB
528             /* Note that here we calculate force*r, not the usual force/r.
529              * This allows avoiding masking the reaction-field contribution,
530              * as frcoul is later multiplied by rinvsq which has been
531              * masked with the cut-off check.
532              */
533
534 #ifdef EXCL_FORCES
535             /* Only add 1/r for non-excluded atom pairs */
536             rinv_ex_SSE0  = gmx_and_pr(rinv_SSE0,int_SSE0);
537             rinv_ex_SSE1  = gmx_and_pr(rinv_SSE1,int_SSE1);
538             rinv_ex_SSE2  = gmx_and_pr(rinv_SSE2,int_SSE2);
539             rinv_ex_SSE3  = gmx_and_pr(rinv_SSE3,int_SSE3);
540 #else
541             /* No exclusion forces, we always need 1/r */
542 #define     rinv_ex_SSE0    rinv_SSE0
543 #define     rinv_ex_SSE1    rinv_SSE1
544 #define     rinv_ex_SSE2    rinv_SSE2
545 #define     rinv_ex_SSE3    rinv_SSE3
546 #endif
547
548 #ifdef CALC_COUL_RF
549             /* Electrostatic interactions */
550             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
551             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
552             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
553             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
554
555 #ifdef CALC_ENERGIES
556             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
557             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
558             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
559             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
560 #endif
561 #endif
562
563 #ifdef CALC_COUL_EWALD
564             /* We need to mask (or limit) rsq for the cut-off,
565              * as large distances can cause an overflow in gmx_pmecorrF/V.
566              */
567 #ifndef CUTOFF_BLENDV
568             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE0,wco_SSE0));
569             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE1,wco_SSE1));
570             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE2,wco_SSE2));
571             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE3,wco_SSE3));
572 #else
573             /* Strangely, putting mul on a separate line is slower (icc 13) */
574             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0)));
575             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1)));
576             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2)));
577             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3)));
578 #endif
579             ewcorr_SSE0   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0),beta_SSE);
580             ewcorr_SSE1   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1),beta_SSE);
581             ewcorr_SSE2   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2),beta_SSE);
582             ewcorr_SSE3   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3),beta_SSE);
583             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(ewcorr_SSE0,brsq_SSE0)));
584             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(ewcorr_SSE1,brsq_SSE1)));
585             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(ewcorr_SSE2,brsq_SSE2)));
586             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(ewcorr_SSE3,brsq_SSE3)));
587
588 #ifdef CALC_ENERGIES
589             vc_sub_SSE0   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0),beta_SSE);
590             vc_sub_SSE1   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1),beta_SSE);
591             vc_sub_SSE2   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2),beta_SSE);
592             vc_sub_SSE3   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3),beta_SSE);
593 #endif
594
595 #endif /* CALC_COUL_EWALD */
596
597 #ifdef CALC_COUL_TAB
598             /* Electrostatic interactions */
599             r_SSE0        = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
600             r_SSE1        = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
601             r_SSE2        = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
602             r_SSE3        = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
603             /* Convert r to scaled table units */
604             rs_SSE0       = gmx_mul_pr(r_SSE0,invtsp_SSE);
605             rs_SSE1       = gmx_mul_pr(r_SSE1,invtsp_SSE);
606             rs_SSE2       = gmx_mul_pr(r_SSE2,invtsp_SSE);
607             rs_SSE3       = gmx_mul_pr(r_SSE3,invtsp_SSE);
608             /* Truncate scaled r to an int */
609             ti_SSE0       = gmx_cvttpr_epi32(rs_SSE0);
610             ti_SSE1       = gmx_cvttpr_epi32(rs_SSE1);
611             ti_SSE2       = gmx_cvttpr_epi32(rs_SSE2);
612             ti_SSE3       = gmx_cvttpr_epi32(rs_SSE3);
613 #ifdef GMX_X86_SSE4_1
614             /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
615             rf_SSE0       = gmx_floor_pr(rs_SSE0);
616             rf_SSE1       = gmx_floor_pr(rs_SSE1);
617             rf_SSE2       = gmx_floor_pr(rs_SSE2);
618             rf_SSE3       = gmx_floor_pr(rs_SSE3);
619 #else
620             rf_SSE0       = gmx_cvtepi32_pr(ti_SSE0);
621             rf_SSE1       = gmx_cvtepi32_pr(ti_SSE1);
622             rf_SSE2       = gmx_cvtepi32_pr(ti_SSE2);
623             rf_SSE3       = gmx_cvtepi32_pr(ti_SSE3);
624 #endif
625             frac_SSE0     = gmx_sub_pr(rs_SSE0,rf_SSE0);
626             frac_SSE1     = gmx_sub_pr(rs_SSE1,rf_SSE1);
627             frac_SSE2     = gmx_sub_pr(rs_SSE2,rf_SSE2);
628             frac_SSE3     = gmx_sub_pr(rs_SSE3,rf_SSE3);
629
630             /* Load and interpolate table forces and possibly energies.
631              * Force and energy can be combined in one table, stride 4: FDV0
632              * or in two separate tables with stride 1: F and V
633              * Currently single precision uses FDV0, double F and V.
634              */
635 #ifndef CALC_ENERGIES
636             load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
637             load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
638             load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
639             load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
640 #else
641 #ifdef TAB_FDV0
642             load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
643             load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
644             load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
645             load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
646 #else
647             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
648             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
649             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
650             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
651 #endif
652 #endif
653             fsub_SSE0     = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
654             fsub_SSE1     = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
655             fsub_SSE2     = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
656             fsub_SSE3     = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
657             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
658             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
659             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
660             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
661
662 #ifdef CALC_ENERGIES
663             vc_sub_SSE0   = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
664             vc_sub_SSE1   = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
665             vc_sub_SSE2   = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
666             vc_sub_SSE3   = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
667 #endif
668 #endif /* CALC_COUL_TAB */
669
670 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
671 #ifndef NO_SHIFT_EWALD
672             /* Add Ewald potential shift to vc_sub for convenience */
673 #ifdef CHECK_EXCLS
674             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
675             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
676             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
677             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
678 #else
679             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
680             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
681             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
682             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
683 #endif
684 #endif
685             
686             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
687             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
688             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
689             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
690
691 #endif
692
693 #ifdef CALC_ENERGIES
694             /* Mask energy for cut-off and diagonal */
695             vcoul_SSE0    = gmx_and_pr(vcoul_SSE0,wco_SSE0);
696             vcoul_SSE1    = gmx_and_pr(vcoul_SSE1,wco_SSE1);
697             vcoul_SSE2    = gmx_and_pr(vcoul_SSE2,wco_SSE2);
698             vcoul_SSE3    = gmx_and_pr(vcoul_SSE3,wco_SSE3);
699 #endif
700
701 #endif /* CALC_COULOMB */
702
703 #ifdef CALC_LJ
704             /* Lennard-Jones interaction */
705
706 #ifdef VDW_CUTOFF_CHECK
707             wco_vdw_SSE0  = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
708             wco_vdw_SSE1  = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
709 #ifndef HALF_LJ
710             wco_vdw_SSE2  = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
711             wco_vdw_SSE3  = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
712 #endif
713 #else
714             /* Same cut-off for Coulomb and VdW, reuse the registers */
715 #define     wco_vdw_SSE0    wco_SSE0
716 #define     wco_vdw_SSE1    wco_SSE1
717 #define     wco_vdw_SSE2    wco_SSE2
718 #define     wco_vdw_SSE3    wco_SSE3
719 #endif
720
721 #ifndef LJ_COMB_LB
722             rinvsix_SSE0  = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
723             rinvsix_SSE1  = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
724 #ifdef EXCL_FORCES
725             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,int_SSE0);
726             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,int_SSE1);
727 #endif
728 #ifndef HALF_LJ
729             rinvsix_SSE2  = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
730             rinvsix_SSE3  = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
731 #ifdef EXCL_FORCES
732             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,int_SSE2);
733             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,int_SSE3);
734 #endif
735 #endif
736 #ifdef VDW_CUTOFF_CHECK
737             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
738             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
739 #ifndef HALF_LJ
740             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
741             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
742 #endif
743 #endif
744             FrLJ6_SSE0    = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
745             FrLJ6_SSE1    = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
746 #ifndef HALF_LJ
747             FrLJ6_SSE2    = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
748             FrLJ6_SSE3    = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
749 #endif
750             FrLJ12_SSE0   = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
751             FrLJ12_SSE1   = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
752 #ifndef HALF_LJ
753             FrLJ12_SSE2   = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
754             FrLJ12_SSE3   = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
755 #endif
756 #endif /* not LJ_COMB_LB */
757
758 #ifdef LJ_COMB_LB
759             sir_SSE0      = gmx_mul_pr(sig_SSE0,rinv_SSE0);
760             sir_SSE1      = gmx_mul_pr(sig_SSE1,rinv_SSE1);
761 #ifndef HALF_LJ
762             sir_SSE2      = gmx_mul_pr(sig_SSE2,rinv_SSE2);
763             sir_SSE3      = gmx_mul_pr(sig_SSE3,rinv_SSE3);
764 #endif
765             sir2_SSE0     = gmx_mul_pr(sir_SSE0,sir_SSE0);
766             sir2_SSE1     = gmx_mul_pr(sir_SSE1,sir_SSE1);
767 #ifndef HALF_LJ
768             sir2_SSE2     = gmx_mul_pr(sir_SSE2,sir_SSE2);
769             sir2_SSE3     = gmx_mul_pr(sir_SSE3,sir_SSE3);
770 #endif
771             sir6_SSE0     = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
772             sir6_SSE1     = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
773 #ifdef EXCL_FORCES
774             sir6_SSE0     = gmx_and_pr(sir6_SSE0,int_SSE0);
775             sir6_SSE1     = gmx_and_pr(sir6_SSE1,int_SSE1);
776 #endif
777 #ifndef HALF_LJ
778             sir6_SSE2     = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
779             sir6_SSE3     = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
780 #ifdef EXCL_FORCES
781             sir6_SSE2     = gmx_and_pr(sir6_SSE2,int_SSE2);
782             sir6_SSE3     = gmx_and_pr(sir6_SSE3,int_SSE3);
783 #endif
784 #endif
785 #ifdef VDW_CUTOFF_CHECK
786             sir6_SSE0     = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
787             sir6_SSE1     = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
788 #ifndef HALF_LJ
789             sir6_SSE2     = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
790             sir6_SSE3     = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
791 #endif
792 #endif
793             FrLJ6_SSE0    = gmx_mul_pr(eps_SSE0,sir6_SSE0);
794             FrLJ6_SSE1    = gmx_mul_pr(eps_SSE1,sir6_SSE1);
795 #ifndef HALF_LJ
796             FrLJ6_SSE2    = gmx_mul_pr(eps_SSE2,sir6_SSE2);
797             FrLJ6_SSE3    = gmx_mul_pr(eps_SSE3,sir6_SSE3);
798 #endif
799             FrLJ12_SSE0   = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
800             FrLJ12_SSE1   = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
801 #ifndef HALF_LJ
802             FrLJ12_SSE2   = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
803             FrLJ12_SSE3   = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
804 #endif
805 #if defined CALC_ENERGIES
806             /* We need C6 and C12 to calculate the LJ potential shift */
807             sig2_SSE0     = gmx_mul_pr(sig_SSE0,sig_SSE0);
808             sig2_SSE1     = gmx_mul_pr(sig_SSE1,sig_SSE1);
809 #ifndef HALF_LJ
810             sig2_SSE2     = gmx_mul_pr(sig_SSE2,sig_SSE2);
811             sig2_SSE3     = gmx_mul_pr(sig_SSE3,sig_SSE3);
812 #endif
813             sig6_SSE0     = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
814             sig6_SSE1     = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
815 #ifndef HALF_LJ
816             sig6_SSE2     = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
817             sig6_SSE3     = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
818 #endif
819             c6_SSE0       = gmx_mul_pr(eps_SSE0,sig6_SSE0);
820             c6_SSE1       = gmx_mul_pr(eps_SSE1,sig6_SSE1);
821 #ifndef HALF_LJ
822             c6_SSE2       = gmx_mul_pr(eps_SSE2,sig6_SSE2);
823             c6_SSE3       = gmx_mul_pr(eps_SSE3,sig6_SSE3);
824 #endif
825             c12_SSE0      = gmx_mul_pr(c6_SSE0,sig6_SSE0);
826             c12_SSE1      = gmx_mul_pr(c6_SSE1,sig6_SSE1);
827 #ifndef HALF_LJ
828             c12_SSE2      = gmx_mul_pr(c6_SSE2,sig6_SSE2);
829             c12_SSE3      = gmx_mul_pr(c6_SSE3,sig6_SSE3);
830 #endif
831 #endif
832 #endif /* LJ_COMB_LB */
833
834 #endif /* CALC_LJ */
835             
836 #ifdef CALC_ENERGIES
837 #ifdef ENERGY_GROUPS
838             /* Extract the group pair index per j pair.
839              * Energy groups are stored per i-cluster, so things get
840              * complicated when the i- and j-cluster size don't match.
841              */
842             {
843                 int egps_j;
844 #if UNROLLJ == 2
845                 egps_j    = nbat->energrp[cj>>1];
846                 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
847 #else
848                 /* We assume UNROLLI <= UNROLLJ */
849                 int jdi;
850                 for(jdi=0; jdi<UNROLLJ/UNROLLI; jdi++)
851                 {
852                     int jj;
853                     egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
854                     for(jj=0; jj<(UNROLLI/2); jj++)
855                     {
856                         egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
857                     }
858                 }
859 #endif
860             }
861 #endif
862
863 #ifdef CALC_COULOMB
864 #ifndef ENERGY_GROUPS
865             vctotSSE      = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
866 #else
867             add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
868             add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
869             add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
870             add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
871 #endif
872 #endif
873
874 #ifdef CALC_LJ
875             /* Calculate the LJ energies */
876             VLJ6_SSE0     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
877             VLJ6_SSE1     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
878 #ifndef HALF_LJ
879             VLJ6_SSE2     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
880             VLJ6_SSE3     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
881 #endif
882             VLJ12_SSE0    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
883             VLJ12_SSE1    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
884 #ifndef HALF_LJ
885             VLJ12_SSE2    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
886             VLJ12_SSE3    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
887 #endif
888
889             VLJ_SSE0      = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
890             VLJ_SSE1      = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
891 #ifndef HALF_LJ
892             VLJ_SSE2      = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
893             VLJ_SSE3      = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
894 #endif
895             /* The potential shift should be removed for pairs beyond cut-off */
896             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
897             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
898 #ifndef HALF_LJ
899             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
900             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
901 #endif
902 #ifdef CHECK_EXCLS
903             /* The potential shift should be removed for excluded pairs */
904             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,int_SSE0);
905             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,int_SSE1);
906 #ifndef HALF_LJ
907             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,int_SSE2);
908             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,int_SSE3);
909 #endif
910 #endif
911 #ifndef ENERGY_GROUPS
912             VvdwtotSSE    = gmx_add_pr(VvdwtotSSE,
913 #ifndef HALF_LJ
914                                        gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
915 #else
916                                        gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
917 #endif
918                                       );
919 #else
920             add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
921             add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
922 #ifndef HALF_LJ
923             add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
924             add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
925 #endif
926 #endif
927 #endif /* CALC_LJ */
928 #endif /* CALC_ENERGIES */
929
930 #ifdef CALC_LJ
931             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,
932 #ifdef CALC_COULOMB
933                                                    gmx_add_pr(frcoul_SSE0,
934 #else
935                                                    (
936 #endif
937                                                     gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
938             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,
939 #ifdef CALC_COULOMB
940                                                    gmx_add_pr(frcoul_SSE1,
941 #else
942                                                    (
943 #endif
944                                                     gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
945 #else
946             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
947             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
948 #endif /* CALC_LJ */
949 #if defined CALC_LJ && !defined HALF_LJ
950             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,
951 #ifdef CALC_COULOMB
952                                                    gmx_add_pr(frcoul_SSE2,
953 #else
954                                                    (
955 #endif
956                                                     gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
957             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,
958 #ifdef CALC_COULOMB
959                                                    gmx_add_pr(frcoul_SSE3,
960 #else
961                                                    (
962 #endif
963                                                     gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
964 #else
965             /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
966             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
967             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
968 #endif
969             
970             /* Calculate temporary vectorial force */
971             tx_SSE0       = gmx_mul_pr(fscal_SSE0,dx_SSE0);
972             tx_SSE1       = gmx_mul_pr(fscal_SSE1,dx_SSE1);
973             tx_SSE2       = gmx_mul_pr(fscal_SSE2,dx_SSE2);
974             tx_SSE3       = gmx_mul_pr(fscal_SSE3,dx_SSE3);
975             ty_SSE0       = gmx_mul_pr(fscal_SSE0,dy_SSE0);
976             ty_SSE1       = gmx_mul_pr(fscal_SSE1,dy_SSE1);
977             ty_SSE2       = gmx_mul_pr(fscal_SSE2,dy_SSE2);
978             ty_SSE3       = gmx_mul_pr(fscal_SSE3,dy_SSE3);
979             tz_SSE0       = gmx_mul_pr(fscal_SSE0,dz_SSE0);
980             tz_SSE1       = gmx_mul_pr(fscal_SSE1,dz_SSE1);
981             tz_SSE2       = gmx_mul_pr(fscal_SSE2,dz_SSE2);
982             tz_SSE3       = gmx_mul_pr(fscal_SSE3,dz_SSE3);
983             
984             /* Increment i atom force */
985             fix_SSE0      = gmx_add_pr(fix_SSE0,tx_SSE0);
986             fix_SSE1      = gmx_add_pr(fix_SSE1,tx_SSE1);
987             fix_SSE2      = gmx_add_pr(fix_SSE2,tx_SSE2);
988             fix_SSE3      = gmx_add_pr(fix_SSE3,tx_SSE3);
989             fiy_SSE0      = gmx_add_pr(fiy_SSE0,ty_SSE0);
990             fiy_SSE1      = gmx_add_pr(fiy_SSE1,ty_SSE1);
991             fiy_SSE2      = gmx_add_pr(fiy_SSE2,ty_SSE2);
992             fiy_SSE3      = gmx_add_pr(fiy_SSE3,ty_SSE3);
993             fiz_SSE0      = gmx_add_pr(fiz_SSE0,tz_SSE0);
994             fiz_SSE1      = gmx_add_pr(fiz_SSE1,tz_SSE1);
995             fiz_SSE2      = gmx_add_pr(fiz_SSE2,tz_SSE2);
996             fiz_SSE3      = gmx_add_pr(fiz_SSE3,tz_SSE3);
997             
998             /* Decrement j atom force */
999             gmx_store_pr(f+ajx,
1000                          gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
1001             gmx_store_pr(f+ajy,
1002                          gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
1003             gmx_store_pr(f+ajz,
1004                          gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));
1005         }
1006
1007 #undef  rinv_ex_SSE0
1008 #undef  rinv_ex_SSE1
1009 #undef  rinv_ex_SSE2
1010 #undef  rinv_ex_SSE3
1011
1012 #undef  wco_vdw_SSE0
1013 #undef  wco_vdw_SSE1
1014 #undef  wco_vdw_SSE2
1015 #undef  wco_vdw_SSE3
1016
1017 #undef  CUTOFF_BLENDV
1018
1019 #undef  EXCL_FORCES