Fixing copyright issues and code contributors
[alexxy/gromacs.git] / src / mdlib / nbnxn_kernels / nbnxn_kernel_simd_4xn_inner.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2009, The GROMACS Development Team
6  * Copyright (c) 2012,2013, by the GROMACS development team, led by
7  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8  * others, as listed in the AUTHORS file in the top-level source
9  * directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39  * This flavor of the kernel calculates interactions of 4 i-atoms
40  * with N j-atoms stored in N wide SIMD registers.
41  */
42
43
44 /* When calculating RF or Ewald interactions we calculate the electrostatic
45  * forces on excluded atom pairs here in the non-bonded loops.
46  * But when energies and/or virial is required we calculate them
47  * separately to as then it is easier to separate the energy and virial
48  * contributions.
49  */
50 #if defined CHECK_EXCLS && defined CALC_COULOMB
51 #define EXCL_FORCES
52 #endif
53
54 /* Without exclusions and energies we only need to mask the cut-off,
55  * this can be faster with blendv (only available with SSE4.1 and later).
56  */
57 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
58 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
59  * With gcc this is slower, except for RF on Sandy Bridge.
60  * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
61  */
62 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
63 #define CUTOFF_BLENDV
64 #endif
65 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
66  * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
67  * Tested with icc 13.
68  */
69 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
70 #define CUTOFF_BLENDV
71 #endif
72 #endif
73
74         {
75             int        cj,aj,ajx,ajy,ajz;
76
77 #ifdef ENERGY_GROUPS
78             /* Energy group indices for two atoms packed into one int */
79             int        egp_jj[UNROLLJ/2];
80 #endif
81
82 #ifdef CHECK_EXCLS
83             /* Interaction (non-exclusion) mask of all 1's or 0's */
84             gmx_mm_pr  int_SSE0;
85             gmx_mm_pr  int_SSE1;
86             gmx_mm_pr  int_SSE2;
87             gmx_mm_pr  int_SSE3;
88 #endif
89
90             gmx_mm_pr  jxSSE,jySSE,jzSSE;
91             gmx_mm_pr  dx_SSE0,dy_SSE0,dz_SSE0;
92             gmx_mm_pr  dx_SSE1,dy_SSE1,dz_SSE1;
93             gmx_mm_pr  dx_SSE2,dy_SSE2,dz_SSE2;
94             gmx_mm_pr  dx_SSE3,dy_SSE3,dz_SSE3;
95             gmx_mm_pr  tx_SSE0,ty_SSE0,tz_SSE0;
96             gmx_mm_pr  tx_SSE1,ty_SSE1,tz_SSE1;
97             gmx_mm_pr  tx_SSE2,ty_SSE2,tz_SSE2;
98             gmx_mm_pr  tx_SSE3,ty_SSE3,tz_SSE3;
99             gmx_mm_pr  rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
100             gmx_mm_pr  rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
101             gmx_mm_pr  rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
102             gmx_mm_pr  rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
103 #ifndef CUTOFF_BLENDV
104             /* wco: within cut-off, mask of all 1's or 0's */
105             gmx_mm_pr  wco_SSE0;
106             gmx_mm_pr  wco_SSE1;
107             gmx_mm_pr  wco_SSE2;
108             gmx_mm_pr  wco_SSE3;
109 #endif
110 #ifdef VDW_CUTOFF_CHECK
111             gmx_mm_pr  wco_vdw_SSE0;
112             gmx_mm_pr  wco_vdw_SSE1;
113 #ifndef HALF_LJ
114             gmx_mm_pr  wco_vdw_SSE2;
115             gmx_mm_pr  wco_vdw_SSE3;
116 #endif
117 #endif
118 #ifdef CALC_COULOMB
119 #ifdef CHECK_EXCLS
120             /* 1/r masked with the interaction mask */
121             gmx_mm_pr  rinv_ex_SSE0;
122             gmx_mm_pr  rinv_ex_SSE1;
123             gmx_mm_pr  rinv_ex_SSE2;
124             gmx_mm_pr  rinv_ex_SSE3;
125 #endif
126             gmx_mm_pr  jq_SSE;
127             gmx_mm_pr  qq_SSE0;
128             gmx_mm_pr  qq_SSE1;
129             gmx_mm_pr  qq_SSE2;
130             gmx_mm_pr  qq_SSE3;
131 #ifdef CALC_COUL_TAB
132             /* The force (PME mesh force) we need to subtract from 1/r^2 */
133             gmx_mm_pr  fsub_SSE0;
134             gmx_mm_pr  fsub_SSE1;
135             gmx_mm_pr  fsub_SSE2;
136             gmx_mm_pr  fsub_SSE3;
137 #endif
138 #ifdef CALC_COUL_EWALD
139             gmx_mm_pr  brsq_SSE0,brsq_SSE1,brsq_SSE2,brsq_SSE3;
140             gmx_mm_pr  ewcorr_SSE0,ewcorr_SSE1,ewcorr_SSE2,ewcorr_SSE3;
141 #endif
142
143             /* frcoul = (1/r - fsub)*r */
144             gmx_mm_pr  frcoul_SSE0;
145             gmx_mm_pr  frcoul_SSE1;
146             gmx_mm_pr  frcoul_SSE2;
147             gmx_mm_pr  frcoul_SSE3;
148 #ifdef CALC_COUL_TAB
149             /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
150             gmx_mm_pr  r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
151             gmx_mm_pr  r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
152             gmx_mm_pr  r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
153             gmx_mm_pr  r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
154             /* Table index: rs truncated to an int */
155 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
156             gmx_epi32  ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
157 #else
158             __m128i    ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
159 #endif
160             /* Linear force table values */
161             gmx_mm_pr  ctab0_SSE0,ctab1_SSE0;
162             gmx_mm_pr  ctab0_SSE1,ctab1_SSE1;
163             gmx_mm_pr  ctab0_SSE2,ctab1_SSE2;
164             gmx_mm_pr  ctab0_SSE3,ctab1_SSE3;
165 #ifdef CALC_ENERGIES
166             /* Quadratic energy table value */
167             gmx_mm_pr  ctabv_SSE0;
168             gmx_mm_pr  ctabv_SSE1;
169             gmx_mm_pr  ctabv_SSE2;
170             gmx_mm_pr  ctabv_SSE3;
171 #endif
172 #endif
173 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
174             /* The potential (PME mesh) we need to subtract from 1/r */
175             gmx_mm_pr  vc_sub_SSE0;
176             gmx_mm_pr  vc_sub_SSE1;
177             gmx_mm_pr  vc_sub_SSE2;
178             gmx_mm_pr  vc_sub_SSE3;
179 #endif
180 #ifdef CALC_ENERGIES
181             /* Electrostatic potential */
182             gmx_mm_pr  vcoul_SSE0;
183             gmx_mm_pr  vcoul_SSE1;
184             gmx_mm_pr  vcoul_SSE2;
185             gmx_mm_pr  vcoul_SSE3;
186 #endif
187 #endif
188             /* The force times 1/r */
189             gmx_mm_pr  fscal_SSE0;
190             gmx_mm_pr  fscal_SSE1;
191             gmx_mm_pr  fscal_SSE2;
192             gmx_mm_pr  fscal_SSE3;
193
194 #ifdef CALC_LJ
195 #ifdef LJ_COMB_LB
196             /* LJ sigma_j/2 and sqrt(epsilon_j) */
197             gmx_mm_pr  hsig_j_SSE,seps_j_SSE;
198             /* LJ sigma_ij and epsilon_ij */
199             gmx_mm_pr  sig_SSE0,eps_SSE0;
200             gmx_mm_pr  sig_SSE1,eps_SSE1;
201 #ifndef HALF_LJ
202             gmx_mm_pr  sig_SSE2,eps_SSE2;
203             gmx_mm_pr  sig_SSE3,eps_SSE3;
204 #endif
205 #ifdef CALC_ENERGIES
206             gmx_mm_pr  sig2_SSE0,sig6_SSE0;
207             gmx_mm_pr  sig2_SSE1,sig6_SSE1;
208 #ifndef HALF_LJ
209             gmx_mm_pr  sig2_SSE2,sig6_SSE2;
210             gmx_mm_pr  sig2_SSE3,sig6_SSE3;
211 #endif
212 #endif /* LJ_COMB_LB */
213 #endif /* CALC_LJ */
214
215 #ifdef LJ_COMB_GEOM
216             gmx_mm_pr  c6s_j_SSE,c12s_j_SSE;
217 #endif
218
219 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
220             /* Index for loading LJ parameters, complicated when interleaving */
221             int         aj2;
222 #endif
223
224 #ifndef FIX_LJ_C
225             /* LJ C6 and C12 parameters, used with geometric comb. rule */
226             gmx_mm_pr  c6_SSE0,c12_SSE0;
227             gmx_mm_pr  c6_SSE1,c12_SSE1;
228 #ifndef HALF_LJ
229             gmx_mm_pr  c6_SSE2,c12_SSE2;
230             gmx_mm_pr  c6_SSE3,c12_SSE3;
231 #endif
232 #endif
233
234             /* Intermediate variables for LJ calculation */
235 #ifndef LJ_COMB_LB
236             gmx_mm_pr  rinvsix_SSE0;
237             gmx_mm_pr  rinvsix_SSE1;
238 #ifndef HALF_LJ
239             gmx_mm_pr  rinvsix_SSE2;
240             gmx_mm_pr  rinvsix_SSE3;
241 #endif
242 #endif
243 #ifdef LJ_COMB_LB
244             gmx_mm_pr  sir_SSE0,sir2_SSE0,sir6_SSE0;
245             gmx_mm_pr  sir_SSE1,sir2_SSE1,sir6_SSE1;
246 #ifndef HALF_LJ
247             gmx_mm_pr  sir_SSE2,sir2_SSE2,sir6_SSE2;
248             gmx_mm_pr  sir_SSE3,sir2_SSE3,sir6_SSE3;
249 #endif
250 #endif
251
252             gmx_mm_pr  FrLJ6_SSE0,FrLJ12_SSE0;
253             gmx_mm_pr  FrLJ6_SSE1,FrLJ12_SSE1;
254 #ifndef HALF_LJ
255             gmx_mm_pr  FrLJ6_SSE2,FrLJ12_SSE2;
256             gmx_mm_pr  FrLJ6_SSE3,FrLJ12_SSE3;
257 #endif
258 #ifdef CALC_ENERGIES
259             gmx_mm_pr  VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
260             gmx_mm_pr  VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
261 #ifndef HALF_LJ
262             gmx_mm_pr  VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
263             gmx_mm_pr  VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
264 #endif
265 #endif
266 #endif /* CALC_LJ */
267
268             /* j-cluster index */
269             cj            = l_cj[cjind].cj;
270
271             /* Atom indices (of the first atom in the cluster) */
272             aj            = cj*UNROLLJ;
273 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
274 #if UNROLLJ == STRIDE
275             aj2           = aj*2;
276 #else
277             aj2           = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
278 #endif
279 #endif
280 #if UNROLLJ == STRIDE
281             ajx           = aj*DIM;
282 #else
283             ajx           = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
284 #endif
285             ajy           = ajx + STRIDE;
286             ajz           = ajy + STRIDE;
287
288 #ifdef CHECK_EXCLS
289 #if defined GMX_X86_SSE2 && defined GMX_MM128_HERE
290             {
291                 /* Load integer interaction mask */
292                 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
293
294                 int_SSE0  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
295                 int_SSE1  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
296                 int_SSE2  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
297                 int_SSE3  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
298             }
299 #endif
300 #if defined GMX_X86_SSE2 && defined GMX_MM256_HERE
301             {
302 #ifndef GMX_DOUBLE
303                 /* Load integer interaction mask */
304                 /* With AVX there are no integer operations, so cast to real */
305                 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
306                 /* We can't compare all 4*8=32 float bits: shift the mask */
307                 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
308                 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
309                  * With gcc we don't need the cast, but it's faster.
310                  */
311 #define cast_cvt(x)  _mm256_cvtepi32_ps(_mm256_castps_si256(x))
312                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
313                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
314                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
315                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
316 #undef cast_cvt
317 #else
318                 /* Load integer interaction mask */
319                 /* With AVX there are no integer operations,
320                  * and there is no int to double conversion, so cast to float
321                  */
322                 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
323 #define cast_cvt(x)  _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
324                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
325                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
326                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
327                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
328 #undef cast_cvt
329 #endif
330             }
331 #endif
332 #endif
333             /* load j atom coordinates */
334             jxSSE         = gmx_load_pr(x+ajx);
335             jySSE         = gmx_load_pr(x+ajy);
336             jzSSE         = gmx_load_pr(x+ajz);
337
338             /* Calculate distance */
339             dx_SSE0       = gmx_sub_pr(ix_SSE0,jxSSE);
340             dy_SSE0       = gmx_sub_pr(iy_SSE0,jySSE);
341             dz_SSE0       = gmx_sub_pr(iz_SSE0,jzSSE);
342             dx_SSE1       = gmx_sub_pr(ix_SSE1,jxSSE);
343             dy_SSE1       = gmx_sub_pr(iy_SSE1,jySSE);
344             dz_SSE1       = gmx_sub_pr(iz_SSE1,jzSSE);
345             dx_SSE2       = gmx_sub_pr(ix_SSE2,jxSSE);
346             dy_SSE2       = gmx_sub_pr(iy_SSE2,jySSE);
347             dz_SSE2       = gmx_sub_pr(iz_SSE2,jzSSE);
348             dx_SSE3       = gmx_sub_pr(ix_SSE3,jxSSE);
349             dy_SSE3       = gmx_sub_pr(iy_SSE3,jySSE);
350             dz_SSE3       = gmx_sub_pr(iz_SSE3,jzSSE);
351
352             /* rsq = dx*dx+dy*dy+dz*dz */
353             rsq_SSE0      = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
354             rsq_SSE1      = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
355             rsq_SSE2      = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
356             rsq_SSE3      = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
357
358 #ifndef CUTOFF_BLENDV
359             wco_SSE0      = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
360             wco_SSE1      = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
361             wco_SSE2      = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
362             wco_SSE3      = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
363 #endif
364
365 #ifdef CHECK_EXCLS
366 #ifdef EXCL_FORCES
367             /* Only remove the (sub-)diagonal to avoid double counting */
368 #if UNROLLJ == UNROLLI
369             if (cj == ci_sh)
370             {
371                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag_SSE0);
372                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag_SSE1);
373                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag_SSE2);
374                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag_SSE3);
375             }
376 #else
377 #if UNROLLJ < UNROLLI
378             if (cj == ci_sh*2)
379             {
380                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
381                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
382                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
383                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
384             }
385             if (cj == ci_sh*2 + 1)
386             { 
387                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
388                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
389                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
390                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
391             }
392 #else
393             if (cj*2 == ci_sh)
394             {
395                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
396                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
397                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
398                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
399             }
400             else if (cj*2 + 1 == ci_sh)
401             {
402                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
403                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
404                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
405                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
406             }
407 #endif
408 #endif
409 #else /* EXCL_FORCES */
410             /* Remove all excluded atom pairs from the list */
411             wco_SSE0      = gmx_and_pr(wco_SSE0,int_SSE0);
412             wco_SSE1      = gmx_and_pr(wco_SSE1,int_SSE1);
413             wco_SSE2      = gmx_and_pr(wco_SSE2,int_SSE2);
414             wco_SSE3      = gmx_and_pr(wco_SSE3,int_SSE3);
415 #endif
416 #endif
417
418 #ifdef COUNT_PAIRS
419             {
420                 int i,j;
421                 real tmp[UNROLLJ];
422                 for(i=0; i<UNROLLI; i++)
423                 {
424                     gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
425                     for(j=0; j<UNROLLJ; j++)
426                     {
427                         if (!(tmp[j] == 0))
428                         {
429                             npair++;
430                         }
431                     }
432                 }
433             }
434 #endif
435
436 #ifdef CHECK_EXCLS
437             /* For excluded pairs add a small number to avoid r^-6 = NaN */
438             rsq_SSE0      = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
439             rsq_SSE1      = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
440             rsq_SSE2      = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
441             rsq_SSE3      = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
442 #endif
443
444             /* Calculate 1/r */
445 #ifndef GMX_DOUBLE
446             rinv_SSE0     = gmx_invsqrt_pr(rsq_SSE0);
447             rinv_SSE1     = gmx_invsqrt_pr(rsq_SSE1);
448             rinv_SSE2     = gmx_invsqrt_pr(rsq_SSE2);
449             rinv_SSE3     = gmx_invsqrt_pr(rsq_SSE3);
450 #else
451             GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
452             GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
453 #endif
454
455 #ifdef CALC_COULOMB
456             /* Load parameters for j atom */
457             jq_SSE        = gmx_load_pr(q+aj);
458             qq_SSE0       = gmx_mul_pr(iq_SSE0,jq_SSE);
459             qq_SSE1       = gmx_mul_pr(iq_SSE1,jq_SSE);
460             qq_SSE2       = gmx_mul_pr(iq_SSE2,jq_SSE);
461             qq_SSE3       = gmx_mul_pr(iq_SSE3,jq_SSE);
462 #endif
463
464 #ifdef CALC_LJ
465
466 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
467             load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
468             load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
469 #ifndef HALF_LJ
470             load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
471             load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
472 #endif
473 #endif /* not defined any LJ rule */
474
475 #ifdef LJ_COMB_GEOM
476             c6s_j_SSE     = gmx_load_pr(ljc+aj2+0);
477             c12s_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
478             c6_SSE0       = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
479             c6_SSE1       = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
480 #ifndef HALF_LJ
481             c6_SSE2       = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
482             c6_SSE3       = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
483 #endif
484             c12_SSE0      = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
485             c12_SSE1      = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
486 #ifndef HALF_LJ
487             c12_SSE2      = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
488             c12_SSE3      = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
489 #endif
490 #endif /* LJ_COMB_GEOM */
491
492 #ifdef LJ_COMB_LB
493             hsig_j_SSE    = gmx_load_pr(ljc+aj2+0);
494             seps_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
495
496             sig_SSE0      = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
497             sig_SSE1      = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
498             eps_SSE0      = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
499             eps_SSE1      = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
500 #ifndef HALF_LJ
501             sig_SSE2      = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
502             sig_SSE3      = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
503             eps_SSE2      = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
504             eps_SSE3      = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
505 #endif
506 #endif /* LJ_COMB_LB */
507
508 #endif /* CALC_LJ */
509
510 #ifndef CUTOFF_BLENDV
511             rinv_SSE0     = gmx_and_pr(rinv_SSE0,wco_SSE0);
512             rinv_SSE1     = gmx_and_pr(rinv_SSE1,wco_SSE1);
513             rinv_SSE2     = gmx_and_pr(rinv_SSE2,wco_SSE2);
514             rinv_SSE3     = gmx_and_pr(rinv_SSE3,wco_SSE3);
515 #else
516             /* We only need to mask for the cut-off: blendv is faster */
517             rinv_SSE0     = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
518             rinv_SSE1     = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
519             rinv_SSE2     = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
520             rinv_SSE3     = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
521 #endif
522
523             rinvsq_SSE0   = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
524             rinvsq_SSE1   = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
525             rinvsq_SSE2   = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
526             rinvsq_SSE3   = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
527
528 #ifdef CALC_COULOMB
529             /* Note that here we calculate force*r, not the usual force/r.
530              * This allows avoiding masking the reaction-field contribution,
531              * as frcoul is later multiplied by rinvsq which has been
532              * masked with the cut-off check.
533              */
534
535 #ifdef EXCL_FORCES
536             /* Only add 1/r for non-excluded atom pairs */
537             rinv_ex_SSE0  = gmx_and_pr(rinv_SSE0,int_SSE0);
538             rinv_ex_SSE1  = gmx_and_pr(rinv_SSE1,int_SSE1);
539             rinv_ex_SSE2  = gmx_and_pr(rinv_SSE2,int_SSE2);
540             rinv_ex_SSE3  = gmx_and_pr(rinv_SSE3,int_SSE3);
541 #else
542             /* No exclusion forces, we always need 1/r */
543 #define     rinv_ex_SSE0    rinv_SSE0
544 #define     rinv_ex_SSE1    rinv_SSE1
545 #define     rinv_ex_SSE2    rinv_SSE2
546 #define     rinv_ex_SSE3    rinv_SSE3
547 #endif
548
549 #ifdef CALC_COUL_RF
550             /* Electrostatic interactions */
551             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
552             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
553             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
554             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
555
556 #ifdef CALC_ENERGIES
557             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
558             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
559             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
560             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
561 #endif
562 #endif
563
564 #ifdef CALC_COUL_EWALD
565             /* We need to mask (or limit) rsq for the cut-off,
566              * as large distances can cause an overflow in gmx_pmecorrF/V.
567              */
568 #ifndef CUTOFF_BLENDV
569             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE0,wco_SSE0));
570             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE1,wco_SSE1));
571             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE2,wco_SSE2));
572             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE3,wco_SSE3));
573 #else
574             /* Strangely, putting mul on a separate line is slower (icc 13) */
575             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0)));
576             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1)));
577             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2)));
578             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3)));
579 #endif
580             ewcorr_SSE0   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0),beta_SSE);
581             ewcorr_SSE1   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1),beta_SSE);
582             ewcorr_SSE2   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2),beta_SSE);
583             ewcorr_SSE3   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3),beta_SSE);
584             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(ewcorr_SSE0,brsq_SSE0)));
585             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(ewcorr_SSE1,brsq_SSE1)));
586             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(ewcorr_SSE2,brsq_SSE2)));
587             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(ewcorr_SSE3,brsq_SSE3)));
588
589 #ifdef CALC_ENERGIES
590             vc_sub_SSE0   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0),beta_SSE);
591             vc_sub_SSE1   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1),beta_SSE);
592             vc_sub_SSE2   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2),beta_SSE);
593             vc_sub_SSE3   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3),beta_SSE);
594 #endif
595
596 #endif /* CALC_COUL_EWALD */
597
598 #ifdef CALC_COUL_TAB
599             /* Electrostatic interactions */
600             r_SSE0        = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
601             r_SSE1        = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
602             r_SSE2        = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
603             r_SSE3        = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
604             /* Convert r to scaled table units */
605             rs_SSE0       = gmx_mul_pr(r_SSE0,invtsp_SSE);
606             rs_SSE1       = gmx_mul_pr(r_SSE1,invtsp_SSE);
607             rs_SSE2       = gmx_mul_pr(r_SSE2,invtsp_SSE);
608             rs_SSE3       = gmx_mul_pr(r_SSE3,invtsp_SSE);
609             /* Truncate scaled r to an int */
610             ti_SSE0       = gmx_cvttpr_epi32(rs_SSE0);
611             ti_SSE1       = gmx_cvttpr_epi32(rs_SSE1);
612             ti_SSE2       = gmx_cvttpr_epi32(rs_SSE2);
613             ti_SSE3       = gmx_cvttpr_epi32(rs_SSE3);
614 #ifdef GMX_X86_SSE4_1
615             /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
616             rf_SSE0       = gmx_floor_pr(rs_SSE0);
617             rf_SSE1       = gmx_floor_pr(rs_SSE1);
618             rf_SSE2       = gmx_floor_pr(rs_SSE2);
619             rf_SSE3       = gmx_floor_pr(rs_SSE3);
620 #else
621             rf_SSE0       = gmx_cvtepi32_pr(ti_SSE0);
622             rf_SSE1       = gmx_cvtepi32_pr(ti_SSE1);
623             rf_SSE2       = gmx_cvtepi32_pr(ti_SSE2);
624             rf_SSE3       = gmx_cvtepi32_pr(ti_SSE3);
625 #endif
626             frac_SSE0     = gmx_sub_pr(rs_SSE0,rf_SSE0);
627             frac_SSE1     = gmx_sub_pr(rs_SSE1,rf_SSE1);
628             frac_SSE2     = gmx_sub_pr(rs_SSE2,rf_SSE2);
629             frac_SSE3     = gmx_sub_pr(rs_SSE3,rf_SSE3);
630
631             /* Load and interpolate table forces and possibly energies.
632              * Force and energy can be combined in one table, stride 4: FDV0
633              * or in two separate tables with stride 1: F and V
634              * Currently single precision uses FDV0, double F and V.
635              */
636 #ifndef CALC_ENERGIES
637             load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
638             load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
639             load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
640             load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
641 #else
642 #ifdef TAB_FDV0
643             load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
644             load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
645             load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
646             load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
647 #else
648             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
649             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
650             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
651             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
652 #endif
653 #endif
654             fsub_SSE0     = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
655             fsub_SSE1     = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
656             fsub_SSE2     = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
657             fsub_SSE3     = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
658             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
659             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
660             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
661             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
662
663 #ifdef CALC_ENERGIES
664             vc_sub_SSE0   = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
665             vc_sub_SSE1   = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
666             vc_sub_SSE2   = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
667             vc_sub_SSE3   = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
668 #endif
669 #endif /* CALC_COUL_TAB */
670
671 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
672 #ifndef NO_SHIFT_EWALD
673             /* Add Ewald potential shift to vc_sub for convenience */
674 #ifdef CHECK_EXCLS
675             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
676             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
677             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
678             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
679 #else
680             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
681             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
682             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
683             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
684 #endif
685 #endif
686             
687             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
688             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
689             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
690             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
691
692 #endif
693
694 #ifdef CALC_ENERGIES
695             /* Mask energy for cut-off and diagonal */
696             vcoul_SSE0    = gmx_and_pr(vcoul_SSE0,wco_SSE0);
697             vcoul_SSE1    = gmx_and_pr(vcoul_SSE1,wco_SSE1);
698             vcoul_SSE2    = gmx_and_pr(vcoul_SSE2,wco_SSE2);
699             vcoul_SSE3    = gmx_and_pr(vcoul_SSE3,wco_SSE3);
700 #endif
701
702 #endif /* CALC_COULOMB */
703
704 #ifdef CALC_LJ
705             /* Lennard-Jones interaction */
706
707 #ifdef VDW_CUTOFF_CHECK
708             wco_vdw_SSE0  = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
709             wco_vdw_SSE1  = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
710 #ifndef HALF_LJ
711             wco_vdw_SSE2  = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
712             wco_vdw_SSE3  = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
713 #endif
714 #else
715             /* Same cut-off for Coulomb and VdW, reuse the registers */
716 #define     wco_vdw_SSE0    wco_SSE0
717 #define     wco_vdw_SSE1    wco_SSE1
718 #define     wco_vdw_SSE2    wco_SSE2
719 #define     wco_vdw_SSE3    wco_SSE3
720 #endif
721
722 #ifndef LJ_COMB_LB
723             rinvsix_SSE0  = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
724             rinvsix_SSE1  = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
725 #ifdef EXCL_FORCES
726             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,int_SSE0);
727             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,int_SSE1);
728 #endif
729 #ifndef HALF_LJ
730             rinvsix_SSE2  = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
731             rinvsix_SSE3  = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
732 #ifdef EXCL_FORCES
733             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,int_SSE2);
734             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,int_SSE3);
735 #endif
736 #endif
737 #ifdef VDW_CUTOFF_CHECK
738             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
739             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
740 #ifndef HALF_LJ
741             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
742             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
743 #endif
744 #endif
745             FrLJ6_SSE0    = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
746             FrLJ6_SSE1    = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
747 #ifndef HALF_LJ
748             FrLJ6_SSE2    = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
749             FrLJ6_SSE3    = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
750 #endif
751             FrLJ12_SSE0   = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
752             FrLJ12_SSE1   = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
753 #ifndef HALF_LJ
754             FrLJ12_SSE2   = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
755             FrLJ12_SSE3   = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
756 #endif
757 #endif /* not LJ_COMB_LB */
758
759 #ifdef LJ_COMB_LB
760             sir_SSE0      = gmx_mul_pr(sig_SSE0,rinv_SSE0);
761             sir_SSE1      = gmx_mul_pr(sig_SSE1,rinv_SSE1);
762 #ifndef HALF_LJ
763             sir_SSE2      = gmx_mul_pr(sig_SSE2,rinv_SSE2);
764             sir_SSE3      = gmx_mul_pr(sig_SSE3,rinv_SSE3);
765 #endif
766             sir2_SSE0     = gmx_mul_pr(sir_SSE0,sir_SSE0);
767             sir2_SSE1     = gmx_mul_pr(sir_SSE1,sir_SSE1);
768 #ifndef HALF_LJ
769             sir2_SSE2     = gmx_mul_pr(sir_SSE2,sir_SSE2);
770             sir2_SSE3     = gmx_mul_pr(sir_SSE3,sir_SSE3);
771 #endif
772             sir6_SSE0     = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
773             sir6_SSE1     = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
774 #ifdef EXCL_FORCES
775             sir6_SSE0     = gmx_and_pr(sir6_SSE0,int_SSE0);
776             sir6_SSE1     = gmx_and_pr(sir6_SSE1,int_SSE1);
777 #endif
778 #ifndef HALF_LJ
779             sir6_SSE2     = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
780             sir6_SSE3     = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
781 #ifdef EXCL_FORCES
782             sir6_SSE2     = gmx_and_pr(sir6_SSE2,int_SSE2);
783             sir6_SSE3     = gmx_and_pr(sir6_SSE3,int_SSE3);
784 #endif
785 #endif
786 #ifdef VDW_CUTOFF_CHECK
787             sir6_SSE0     = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
788             sir6_SSE1     = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
789 #ifndef HALF_LJ
790             sir6_SSE2     = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
791             sir6_SSE3     = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
792 #endif
793 #endif
794             FrLJ6_SSE0    = gmx_mul_pr(eps_SSE0,sir6_SSE0);
795             FrLJ6_SSE1    = gmx_mul_pr(eps_SSE1,sir6_SSE1);
796 #ifndef HALF_LJ
797             FrLJ6_SSE2    = gmx_mul_pr(eps_SSE2,sir6_SSE2);
798             FrLJ6_SSE3    = gmx_mul_pr(eps_SSE3,sir6_SSE3);
799 #endif
800             FrLJ12_SSE0   = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
801             FrLJ12_SSE1   = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
802 #ifndef HALF_LJ
803             FrLJ12_SSE2   = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
804             FrLJ12_SSE3   = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
805 #endif
806 #if defined CALC_ENERGIES
807             /* We need C6 and C12 to calculate the LJ potential shift */
808             sig2_SSE0     = gmx_mul_pr(sig_SSE0,sig_SSE0);
809             sig2_SSE1     = gmx_mul_pr(sig_SSE1,sig_SSE1);
810 #ifndef HALF_LJ
811             sig2_SSE2     = gmx_mul_pr(sig_SSE2,sig_SSE2);
812             sig2_SSE3     = gmx_mul_pr(sig_SSE3,sig_SSE3);
813 #endif
814             sig6_SSE0     = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
815             sig6_SSE1     = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
816 #ifndef HALF_LJ
817             sig6_SSE2     = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
818             sig6_SSE3     = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
819 #endif
820             c6_SSE0       = gmx_mul_pr(eps_SSE0,sig6_SSE0);
821             c6_SSE1       = gmx_mul_pr(eps_SSE1,sig6_SSE1);
822 #ifndef HALF_LJ
823             c6_SSE2       = gmx_mul_pr(eps_SSE2,sig6_SSE2);
824             c6_SSE3       = gmx_mul_pr(eps_SSE3,sig6_SSE3);
825 #endif
826             c12_SSE0      = gmx_mul_pr(c6_SSE0,sig6_SSE0);
827             c12_SSE1      = gmx_mul_pr(c6_SSE1,sig6_SSE1);
828 #ifndef HALF_LJ
829             c12_SSE2      = gmx_mul_pr(c6_SSE2,sig6_SSE2);
830             c12_SSE3      = gmx_mul_pr(c6_SSE3,sig6_SSE3);
831 #endif
832 #endif
833 #endif /* LJ_COMB_LB */
834
835 #endif /* CALC_LJ */
836             
837 #ifdef CALC_ENERGIES
838 #ifdef ENERGY_GROUPS
839             /* Extract the group pair index per j pair.
840              * Energy groups are stored per i-cluster, so things get
841              * complicated when the i- and j-cluster size don't match.
842              */
843             {
844                 int egps_j;
845 #if UNROLLJ == 2
846                 egps_j    = nbat->energrp[cj>>1];
847                 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
848 #else
849                 /* We assume UNROLLI <= UNROLLJ */
850                 int jdi;
851                 for(jdi=0; jdi<UNROLLJ/UNROLLI; jdi++)
852                 {
853                     int jj;
854                     egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
855                     for(jj=0; jj<(UNROLLI/2); jj++)
856                     {
857                         egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
858                     }
859                 }
860 #endif
861             }
862 #endif
863
864 #ifdef CALC_COULOMB
865 #ifndef ENERGY_GROUPS
866             vctotSSE      = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
867 #else
868             add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
869             add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
870             add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
871             add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
872 #endif
873 #endif
874
875 #ifdef CALC_LJ
876             /* Calculate the LJ energies */
877             VLJ6_SSE0     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
878             VLJ6_SSE1     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
879 #ifndef HALF_LJ
880             VLJ6_SSE2     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
881             VLJ6_SSE3     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
882 #endif
883             VLJ12_SSE0    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
884             VLJ12_SSE1    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
885 #ifndef HALF_LJ
886             VLJ12_SSE2    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
887             VLJ12_SSE3    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
888 #endif
889
890             VLJ_SSE0      = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
891             VLJ_SSE1      = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
892 #ifndef HALF_LJ
893             VLJ_SSE2      = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
894             VLJ_SSE3      = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
895 #endif
896             /* The potential shift should be removed for pairs beyond cut-off */
897             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
898             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
899 #ifndef HALF_LJ
900             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
901             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
902 #endif
903 #ifdef CHECK_EXCLS
904             /* The potential shift should be removed for excluded pairs */
905             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,int_SSE0);
906             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,int_SSE1);
907 #ifndef HALF_LJ
908             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,int_SSE2);
909             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,int_SSE3);
910 #endif
911 #endif
912 #ifndef ENERGY_GROUPS
913             VvdwtotSSE    = gmx_add_pr(VvdwtotSSE,
914 #ifndef HALF_LJ
915                                        gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
916 #else
917                                        gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
918 #endif
919                                       );
920 #else
921             add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
922             add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
923 #ifndef HALF_LJ
924             add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
925             add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
926 #endif
927 #endif
928 #endif /* CALC_LJ */
929 #endif /* CALC_ENERGIES */
930
931 #ifdef CALC_LJ
932             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,
933 #ifdef CALC_COULOMB
934                                                    gmx_add_pr(frcoul_SSE0,
935 #else
936                                                    (
937 #endif
938                                                     gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
939             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,
940 #ifdef CALC_COULOMB
941                                                    gmx_add_pr(frcoul_SSE1,
942 #else
943                                                    (
944 #endif
945                                                     gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
946 #else
947             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
948             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
949 #endif /* CALC_LJ */
950 #if defined CALC_LJ && !defined HALF_LJ
951             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,
952 #ifdef CALC_COULOMB
953                                                    gmx_add_pr(frcoul_SSE2,
954 #else
955                                                    (
956 #endif
957                                                     gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
958             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,
959 #ifdef CALC_COULOMB
960                                                    gmx_add_pr(frcoul_SSE3,
961 #else
962                                                    (
963 #endif
964                                                     gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
965 #else
966             /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
967             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
968             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
969 #endif
970             
971             /* Calculate temporary vectorial force */
972             tx_SSE0       = gmx_mul_pr(fscal_SSE0,dx_SSE0);
973             tx_SSE1       = gmx_mul_pr(fscal_SSE1,dx_SSE1);
974             tx_SSE2       = gmx_mul_pr(fscal_SSE2,dx_SSE2);
975             tx_SSE3       = gmx_mul_pr(fscal_SSE3,dx_SSE3);
976             ty_SSE0       = gmx_mul_pr(fscal_SSE0,dy_SSE0);
977             ty_SSE1       = gmx_mul_pr(fscal_SSE1,dy_SSE1);
978             ty_SSE2       = gmx_mul_pr(fscal_SSE2,dy_SSE2);
979             ty_SSE3       = gmx_mul_pr(fscal_SSE3,dy_SSE3);
980             tz_SSE0       = gmx_mul_pr(fscal_SSE0,dz_SSE0);
981             tz_SSE1       = gmx_mul_pr(fscal_SSE1,dz_SSE1);
982             tz_SSE2       = gmx_mul_pr(fscal_SSE2,dz_SSE2);
983             tz_SSE3       = gmx_mul_pr(fscal_SSE3,dz_SSE3);
984             
985             /* Increment i atom force */
986             fix_SSE0      = gmx_add_pr(fix_SSE0,tx_SSE0);
987             fix_SSE1      = gmx_add_pr(fix_SSE1,tx_SSE1);
988             fix_SSE2      = gmx_add_pr(fix_SSE2,tx_SSE2);
989             fix_SSE3      = gmx_add_pr(fix_SSE3,tx_SSE3);
990             fiy_SSE0      = gmx_add_pr(fiy_SSE0,ty_SSE0);
991             fiy_SSE1      = gmx_add_pr(fiy_SSE1,ty_SSE1);
992             fiy_SSE2      = gmx_add_pr(fiy_SSE2,ty_SSE2);
993             fiy_SSE3      = gmx_add_pr(fiy_SSE3,ty_SSE3);
994             fiz_SSE0      = gmx_add_pr(fiz_SSE0,tz_SSE0);
995             fiz_SSE1      = gmx_add_pr(fiz_SSE1,tz_SSE1);
996             fiz_SSE2      = gmx_add_pr(fiz_SSE2,tz_SSE2);
997             fiz_SSE3      = gmx_add_pr(fiz_SSE3,tz_SSE3);
998             
999             /* Decrement j atom force */
1000             gmx_store_pr(f+ajx,
1001                          gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
1002             gmx_store_pr(f+ajy,
1003                          gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
1004             gmx_store_pr(f+ajz,
1005                          gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));
1006         }
1007
1008 #undef  rinv_ex_SSE0
1009 #undef  rinv_ex_SSE1
1010 #undef  rinv_ex_SSE2
1011 #undef  rinv_ex_SSE3
1012
1013 #undef  wco_vdw_SSE0
1014 #undef  wco_vdw_SSE1
1015 #undef  wco_vdw_SSE2
1016 #undef  wco_vdw_SSE3
1017
1018 #undef  CUTOFF_BLENDV
1019
1020 #undef  EXCL_FORCES