Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / mdlib / nbnxn_kernels / nbnxn_kernel_x86_simd_inner.h
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
9  * Copyright (c) 2001-2009, The GROMACS Development Team
10  *
11  * Gromacs is a library for molecular simulation and trajectory analysis,
12  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
13  * a full list of developers and information, check out http://www.gromacs.org
14  *
15  * This program is free software; you can redistribute it and/or modify it under 
16  * the terms of the GNU Lesser General Public License as published by the Free 
17  * Software Foundation; either version 2 of the License, or (at your option) any 
18  * later version.
19  * As a special exception, you may use this file as part of a free software
20  * library without restriction.  Specifically, if other files instantiate
21  * templates or use macros or inline functions from this file, or you compile
22  * this file and link it with other files to produce an executable, this
23  * file does not by itself cause the resulting executable to be covered by
24  * the GNU Lesser General Public License.  
25  *
26  * In plain-speak: do not worry about classes/macros/templates either - only
27  * changes to the library have to be LGPL, not an application linking with it.
28  *
29  * To help fund GROMACS development, we humbly ask that you cite
30  * the papers people have written on it - you can find them on the website!
31  */
32
33 /* This is the innermost loop contents for the n vs n atom
34  * SSE2 single precision kernels.
35  */
36
37
38 /* When calculating RF or Ewald interactions we calculate the electrostatic
39  * forces on excluded atom pairs here in the non-bonded loops.
40  * But when energies and/or virial is required we calculate them
41  * separately to as then it is easier to separate the energy and virial
42  * contributions.
43  */
44 #if defined CHECK_EXCLS && defined CALC_COULOMB
45 #define EXCL_FORCES
46 #endif
47
48 /* Without exclusions and energies we only need to mask the cut-off,
49  * this can be faster with blendv (only available with SSE4.1 and later).
50  */
51 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
52 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
53  * With gcc this is slower, except for RF on Sandy Bridge.
54  * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
55  */
56 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
57 #define CUTOFF_BLENDV
58 #endif
59 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
60  * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
61  * Tested with icc 13.
62  */
63 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
64 #define CUTOFF_BLENDV
65 #endif
66 #endif
67
68         {
69             int        cj,aj,ajx,ajy,ajz;
70
71 #ifdef ENERGY_GROUPS
72             /* Energy group indices for two atoms packed into one int */
73             int        egp_jj[UNROLLJ/2];
74 #endif
75
76 #ifdef CHECK_EXCLS
77             /* Interaction (non-exclusion) mask of all 1's or 0's */
78             gmx_mm_pr  int_SSE0;
79             gmx_mm_pr  int_SSE1;
80             gmx_mm_pr  int_SSE2;
81             gmx_mm_pr  int_SSE3;
82 #endif
83
84             gmx_mm_pr  jxSSE,jySSE,jzSSE;
85             gmx_mm_pr  dx_SSE0,dy_SSE0,dz_SSE0;
86             gmx_mm_pr  dx_SSE1,dy_SSE1,dz_SSE1;
87             gmx_mm_pr  dx_SSE2,dy_SSE2,dz_SSE2;
88             gmx_mm_pr  dx_SSE3,dy_SSE3,dz_SSE3;
89             gmx_mm_pr  tx_SSE0,ty_SSE0,tz_SSE0;
90             gmx_mm_pr  tx_SSE1,ty_SSE1,tz_SSE1;
91             gmx_mm_pr  tx_SSE2,ty_SSE2,tz_SSE2;
92             gmx_mm_pr  tx_SSE3,ty_SSE3,tz_SSE3;
93             gmx_mm_pr  rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
94             gmx_mm_pr  rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
95             gmx_mm_pr  rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
96             gmx_mm_pr  rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
97 #ifndef CUTOFF_BLENDV
98             /* wco: within cut-off, mask of all 1's or 0's */
99             gmx_mm_pr  wco_SSE0;
100             gmx_mm_pr  wco_SSE1;
101             gmx_mm_pr  wco_SSE2;
102             gmx_mm_pr  wco_SSE3;
103 #endif
104 #ifdef VDW_CUTOFF_CHECK
105             gmx_mm_pr  wco_vdw_SSE0;
106             gmx_mm_pr  wco_vdw_SSE1;
107 #ifndef HALF_LJ
108             gmx_mm_pr  wco_vdw_SSE2;
109             gmx_mm_pr  wco_vdw_SSE3;
110 #endif
111 #endif
112 #ifdef CALC_COULOMB
113 #ifdef CHECK_EXCLS
114             /* 1/r masked with the interaction mask */
115             gmx_mm_pr  rinv_ex_SSE0;
116             gmx_mm_pr  rinv_ex_SSE1;
117             gmx_mm_pr  rinv_ex_SSE2;
118             gmx_mm_pr  rinv_ex_SSE3;
119 #endif
120             gmx_mm_pr  jq_SSE;
121             gmx_mm_pr  qq_SSE0;
122             gmx_mm_pr  qq_SSE1;
123             gmx_mm_pr  qq_SSE2;
124             gmx_mm_pr  qq_SSE3;
125 #ifdef CALC_COUL_TAB
126             /* The force (PME mesh force) we need to subtract from 1/r^2 */
127             gmx_mm_pr  fsub_SSE0;
128             gmx_mm_pr  fsub_SSE1;
129             gmx_mm_pr  fsub_SSE2;
130             gmx_mm_pr  fsub_SSE3;
131 #endif
132 #ifdef CALC_COUL_EWALD
133             gmx_mm_pr  brsq_SSE0,brsq_SSE1,brsq_SSE2,brsq_SSE3;
134             gmx_mm_pr  ewcorr_SSE0,ewcorr_SSE1,ewcorr_SSE2,ewcorr_SSE3;
135 #endif
136
137             /* frcoul = (1/r - fsub)*r */
138             gmx_mm_pr  frcoul_SSE0;
139             gmx_mm_pr  frcoul_SSE1;
140             gmx_mm_pr  frcoul_SSE2;
141             gmx_mm_pr  frcoul_SSE3;
142 #ifdef CALC_COUL_TAB
143             /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
144             gmx_mm_pr  r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
145             gmx_mm_pr  r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
146             gmx_mm_pr  r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
147             gmx_mm_pr  r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
148             /* Table index: rs converted to an int */ 
149 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
150             gmx_epi32  ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
151 #else
152             __m128i    ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
153 #endif
154             /* Linear force table values */
155             gmx_mm_pr  ctab0_SSE0,ctab1_SSE0;
156             gmx_mm_pr  ctab0_SSE1,ctab1_SSE1;
157             gmx_mm_pr  ctab0_SSE2,ctab1_SSE2;
158             gmx_mm_pr  ctab0_SSE3,ctab1_SSE3;
159 #ifdef CALC_ENERGIES
160             /* Quadratic energy table value */
161             gmx_mm_pr  ctabv_SSE0;
162             gmx_mm_pr  ctabv_SSE1;
163             gmx_mm_pr  ctabv_SSE2;
164             gmx_mm_pr  ctabv_SSE3;
165 #endif
166 #endif
167 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
168             /* The potential (PME mesh) we need to subtract from 1/r */
169             gmx_mm_pr  vc_sub_SSE0;
170             gmx_mm_pr  vc_sub_SSE1;
171             gmx_mm_pr  vc_sub_SSE2;
172             gmx_mm_pr  vc_sub_SSE3;
173 #endif
174 #ifdef CALC_ENERGIES
175             /* Electrostatic potential */
176             gmx_mm_pr  vcoul_SSE0;
177             gmx_mm_pr  vcoul_SSE1;
178             gmx_mm_pr  vcoul_SSE2;
179             gmx_mm_pr  vcoul_SSE3;
180 #endif
181 #endif
182             /* The force times 1/r */
183             gmx_mm_pr  fscal_SSE0;
184             gmx_mm_pr  fscal_SSE1;
185             gmx_mm_pr  fscal_SSE2;
186             gmx_mm_pr  fscal_SSE3;
187
188 #ifdef CALC_LJ
189 #ifdef LJ_COMB_LB
190             /* LJ sigma_j/2 and sqrt(epsilon_j) */
191             gmx_mm_pr  hsig_j_SSE,seps_j_SSE;
192             /* LJ sigma_ij and epsilon_ij */
193             gmx_mm_pr  sig_SSE0,eps_SSE0;
194             gmx_mm_pr  sig_SSE1,eps_SSE1;
195 #ifndef HALF_LJ
196             gmx_mm_pr  sig_SSE2,eps_SSE2;
197             gmx_mm_pr  sig_SSE3,eps_SSE3;
198 #endif
199 #ifdef CALC_ENERGIES
200             gmx_mm_pr  sig2_SSE0,sig6_SSE0;
201             gmx_mm_pr  sig2_SSE1,sig6_SSE1;
202 #ifndef HALF_LJ
203             gmx_mm_pr  sig2_SSE2,sig6_SSE2;
204             gmx_mm_pr  sig2_SSE3,sig6_SSE3;
205 #endif
206 #endif /* LJ_COMB_LB */
207 #endif /* CALC_LJ */
208
209 #ifdef LJ_COMB_GEOM
210             gmx_mm_pr  c6s_j_SSE,c12s_j_SSE;
211 #endif
212
213 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
214             /* Index for loading LJ parameters, complicated when interleaving */
215             int         aj2;
216 #endif
217
218 #ifndef FIX_LJ_C
219             /* LJ C6 and C12 parameters, used with geometric comb. rule */
220             gmx_mm_pr  c6_SSE0,c12_SSE0;
221             gmx_mm_pr  c6_SSE1,c12_SSE1;
222 #ifndef HALF_LJ
223             gmx_mm_pr  c6_SSE2,c12_SSE2;
224             gmx_mm_pr  c6_SSE3,c12_SSE3;
225 #endif
226 #endif
227
228             /* Intermediate variables for LJ calculation */
229 #ifndef LJ_COMB_LB
230             gmx_mm_pr  rinvsix_SSE0;
231             gmx_mm_pr  rinvsix_SSE1;
232 #ifndef HALF_LJ
233             gmx_mm_pr  rinvsix_SSE2;
234             gmx_mm_pr  rinvsix_SSE3;
235 #endif
236 #endif
237 #ifdef LJ_COMB_LB
238             gmx_mm_pr  sir_SSE0,sir2_SSE0,sir6_SSE0;
239             gmx_mm_pr  sir_SSE1,sir2_SSE1,sir6_SSE1;
240 #ifndef HALF_LJ
241             gmx_mm_pr  sir_SSE2,sir2_SSE2,sir6_SSE2;
242             gmx_mm_pr  sir_SSE3,sir2_SSE3,sir6_SSE3;
243 #endif
244 #endif
245
246             gmx_mm_pr  FrLJ6_SSE0,FrLJ12_SSE0;
247             gmx_mm_pr  FrLJ6_SSE1,FrLJ12_SSE1;
248 #ifndef HALF_LJ
249             gmx_mm_pr  FrLJ6_SSE2,FrLJ12_SSE2;
250             gmx_mm_pr  FrLJ6_SSE3,FrLJ12_SSE3;
251 #endif
252 #ifdef CALC_ENERGIES
253             gmx_mm_pr  VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
254             gmx_mm_pr  VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
255 #ifndef HALF_LJ
256             gmx_mm_pr  VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
257             gmx_mm_pr  VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
258 #endif
259 #endif
260 #endif /* CALC_LJ */
261
262             /* j-cluster index */
263             cj            = l_cj[cjind].cj;
264
265             /* Atom indices (of the first atom in the cluster) */
266             aj            = cj*UNROLLJ;
267 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
268 #if UNROLLJ == STRIDE
269             aj2           = aj*2;
270 #else
271             aj2           = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
272 #endif
273 #endif
274 #if UNROLLJ == STRIDE
275             ajx           = aj*DIM;
276 #else
277             ajx           = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
278 #endif
279             ajy           = ajx + STRIDE;
280             ajz           = ajy + STRIDE;
281
282 #ifdef CHECK_EXCLS
283 #ifndef GMX_MM256_HERE
284             {
285                 /* Load integer interaction mask */
286                 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
287
288                 /* The is no unequal sse instruction, so we need a not here */
289                 int_SSE0  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
290                 int_SSE1  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
291                 int_SSE2  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
292                 int_SSE3  = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
293             }
294 #else
295             {
296 #ifndef GMX_DOUBLE
297                 /* Load integer interaction mask */
298                 /* With AVX there are no integer operations, so cast to real */
299                 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
300                 /* We can't compare all 4*8=32 float bits: shift the mask */
301                 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
302                 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
303                  * With gcc we don't need the cast, but it's faster.
304                  */
305 #define cast_cvt(x)  _mm256_cvtepi32_ps(_mm256_castps_si256(x))
306                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
307                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
308                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
309                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
310 #undef cast_cvt
311 #else
312                 /* Load integer interaction mask */
313                 /* With AVX there are no integer operations,
314                  * and there is no int to double conversion, so cast to float
315                  */
316                 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
317 #define cast_cvt(x)  _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
318                 int_SSE0  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
319                 int_SSE1  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
320                 int_SSE2  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
321                 int_SSE3  = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
322 #undef cast_cvt
323 #endif
324             }
325 #endif
326 #endif
327             /* load j atom coordinates */
328             jxSSE         = gmx_load_pr(x+ajx);
329             jySSE         = gmx_load_pr(x+ajy);
330             jzSSE         = gmx_load_pr(x+ajz);
331             
332             /* Calculate distance */
333             dx_SSE0       = gmx_sub_pr(ix_SSE0,jxSSE);
334             dy_SSE0       = gmx_sub_pr(iy_SSE0,jySSE);
335             dz_SSE0       = gmx_sub_pr(iz_SSE0,jzSSE);
336             dx_SSE1       = gmx_sub_pr(ix_SSE1,jxSSE);
337             dy_SSE1       = gmx_sub_pr(iy_SSE1,jySSE);
338             dz_SSE1       = gmx_sub_pr(iz_SSE1,jzSSE);
339             dx_SSE2       = gmx_sub_pr(ix_SSE2,jxSSE);
340             dy_SSE2       = gmx_sub_pr(iy_SSE2,jySSE);
341             dz_SSE2       = gmx_sub_pr(iz_SSE2,jzSSE);
342             dx_SSE3       = gmx_sub_pr(ix_SSE3,jxSSE);
343             dy_SSE3       = gmx_sub_pr(iy_SSE3,jySSE);
344             dz_SSE3       = gmx_sub_pr(iz_SSE3,jzSSE);
345             
346             /* rsq = dx*dx+dy*dy+dz*dz */
347             rsq_SSE0      = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
348             rsq_SSE1      = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
349             rsq_SSE2      = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
350             rsq_SSE3      = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
351
352 #ifndef CUTOFF_BLENDV
353             wco_SSE0      = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
354             wco_SSE1      = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
355             wco_SSE2      = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
356             wco_SSE3      = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
357 #endif
358
359 #ifdef CHECK_EXCLS
360 #ifdef EXCL_FORCES
361             /* Only remove the (sub-)diagonal to avoid double counting */
362 #if UNROLLJ == UNROLLI
363             if (cj == ci_sh)
364             {
365                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag_SSE0);
366                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag_SSE1);
367                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag_SSE2);
368                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag_SSE3);
369             }
370 #else
371 #if UNROLLJ < UNROLLI
372             if (cj == ci_sh*2)
373             {
374                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
375                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
376                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
377                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
378             }
379             if (cj == ci_sh*2 + 1)
380             { 
381                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
382                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
383                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
384                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
385             }
386 #else
387             if (cj*2 == ci_sh)
388             {
389                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag0_SSE0);
390                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag0_SSE1);
391                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag0_SSE2);
392                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag0_SSE3);
393             }
394             else if (cj*2 + 1 == ci_sh)
395             {
396                 wco_SSE0  = gmx_and_pr(wco_SSE0,diag1_SSE0);
397                 wco_SSE1  = gmx_and_pr(wco_SSE1,diag1_SSE1);
398                 wco_SSE2  = gmx_and_pr(wco_SSE2,diag1_SSE2);
399                 wco_SSE3  = gmx_and_pr(wco_SSE3,diag1_SSE3);
400             }
401 #endif
402 #endif
403 #else /* EXCL_FORCES */
404             /* Remove all excluded atom pairs from the list */
405             wco_SSE0      = gmx_and_pr(wco_SSE0,int_SSE0);
406             wco_SSE1      = gmx_and_pr(wco_SSE1,int_SSE1);
407             wco_SSE2      = gmx_and_pr(wco_SSE2,int_SSE2);
408             wco_SSE3      = gmx_and_pr(wco_SSE3,int_SSE3);
409 #endif
410 #endif
411
412 #ifdef COUNT_PAIRS
413             {
414                 int i,j;
415                 real tmp[UNROLLJ];
416                 for(i=0; i<UNROLLI; i++)
417                 {
418                     gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
419                     for(j=0; j<UNROLLJ; j++)
420                     {
421                         if (!(tmp[j] == 0))
422                         {
423                             npair++;
424                         }
425                     }
426                 }
427             }
428 #endif
429
430 #ifdef CHECK_EXCLS
431             /* For excluded pairs add a small number to avoid r^-6 = NaN */
432             rsq_SSE0      = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
433             rsq_SSE1      = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
434             rsq_SSE2      = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
435             rsq_SSE3      = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
436 #endif
437
438             /* Calculate 1/r */
439 #ifndef GMX_DOUBLE
440             rinv_SSE0     = gmx_invsqrt_pr(rsq_SSE0);
441             rinv_SSE1     = gmx_invsqrt_pr(rsq_SSE1);
442             rinv_SSE2     = gmx_invsqrt_pr(rsq_SSE2);
443             rinv_SSE3     = gmx_invsqrt_pr(rsq_SSE3);
444 #else
445             GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
446             GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
447 #endif
448
449 #ifdef CALC_COULOMB
450             /* Load parameters for j atom */
451             jq_SSE        = gmx_load_pr(q+aj);
452             qq_SSE0       = gmx_mul_pr(iq_SSE0,jq_SSE);
453             qq_SSE1       = gmx_mul_pr(iq_SSE1,jq_SSE);
454             qq_SSE2       = gmx_mul_pr(iq_SSE2,jq_SSE);
455             qq_SSE3       = gmx_mul_pr(iq_SSE3,jq_SSE);
456 #endif
457
458 #ifdef CALC_LJ
459
460 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
461             load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
462             load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
463 #ifndef HALF_LJ
464             load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
465             load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
466 #endif
467 #endif /* not defined any LJ rule */
468
469 #ifdef LJ_COMB_GEOM
470             c6s_j_SSE     = gmx_load_pr(ljc+aj2+0);
471             c12s_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
472             c6_SSE0       = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
473             c6_SSE1       = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
474 #ifndef HALF_LJ
475             c6_SSE2       = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
476             c6_SSE3       = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
477 #endif
478             c12_SSE0      = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
479             c12_SSE1      = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
480 #ifndef HALF_LJ
481             c12_SSE2      = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
482             c12_SSE3      = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
483 #endif
484 #endif /* LJ_COMB_GEOM */
485
486 #ifdef LJ_COMB_LB
487             hsig_j_SSE    = gmx_load_pr(ljc+aj2+0);
488             seps_j_SSE    = gmx_load_pr(ljc+aj2+STRIDE);
489
490             sig_SSE0      = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
491             sig_SSE1      = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
492             eps_SSE0      = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
493             eps_SSE1      = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
494 #ifndef HALF_LJ
495             sig_SSE2      = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
496             sig_SSE3      = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
497             eps_SSE2      = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
498             eps_SSE3      = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
499 #endif
500 #endif /* LJ_COMB_LB */
501
502 #endif /* CALC_LJ */
503
504 #ifndef CUTOFF_BLENDV
505             rinv_SSE0     = gmx_and_pr(rinv_SSE0,wco_SSE0);
506             rinv_SSE1     = gmx_and_pr(rinv_SSE1,wco_SSE1);
507             rinv_SSE2     = gmx_and_pr(rinv_SSE2,wco_SSE2);
508             rinv_SSE3     = gmx_and_pr(rinv_SSE3,wco_SSE3);
509 #else
510             /* We only need to mask for the cut-off: blendv is faster */
511             rinv_SSE0     = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
512             rinv_SSE1     = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
513             rinv_SSE2     = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
514             rinv_SSE3     = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
515 #endif
516
517             rinvsq_SSE0   = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
518             rinvsq_SSE1   = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
519             rinvsq_SSE2   = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
520             rinvsq_SSE3   = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
521
522 #ifdef CALC_COULOMB
523             /* Note that here we calculate force*r, not the usual force/r.
524              * This allows avoiding masking the reaction-field contribution,
525              * as frcoul is later multiplied by rinvsq which has been
526              * masked with the cut-off check.
527              */
528
529 #ifdef EXCL_FORCES
530             /* Only add 1/r for non-excluded atom pairs */
531             rinv_ex_SSE0  = gmx_and_pr(rinv_SSE0,int_SSE0);
532             rinv_ex_SSE1  = gmx_and_pr(rinv_SSE1,int_SSE1);
533             rinv_ex_SSE2  = gmx_and_pr(rinv_SSE2,int_SSE2);
534             rinv_ex_SSE3  = gmx_and_pr(rinv_SSE3,int_SSE3);
535 #else
536             /* No exclusion forces, we always need 1/r */
537 #define     rinv_ex_SSE0    rinv_SSE0
538 #define     rinv_ex_SSE1    rinv_SSE1
539 #define     rinv_ex_SSE2    rinv_SSE2
540 #define     rinv_ex_SSE3    rinv_SSE3
541 #endif
542
543 #ifdef CALC_COUL_RF
544             /* Electrostatic interactions */
545             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
546             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
547             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
548             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
549
550 #ifdef CALC_ENERGIES
551             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
552             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
553             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
554             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
555 #endif
556 #endif
557
558 #ifdef CALC_COUL_EWALD
559             /* We need to mask (or limit) rsq for the cut-off,
560              * as large distances can cause an overflow in gmx_pmecorrF/V.
561              */
562 #ifndef CUTOFF_BLENDV
563             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE0,wco_SSE0));
564             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE1,wco_SSE1));
565             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE2,wco_SSE2));
566             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE3,wco_SSE3));
567 #else
568             /* Strangely, putting mul on a separate line is slower (icc 13) */
569             brsq_SSE0     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0)));
570             brsq_SSE1     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1)));
571             brsq_SSE2     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2)));
572             brsq_SSE3     = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3)));
573 #endif
574             ewcorr_SSE0   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0),beta_SSE);
575             ewcorr_SSE1   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1),beta_SSE);
576             ewcorr_SSE2   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2),beta_SSE);
577             ewcorr_SSE3   = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3),beta_SSE);
578             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(ewcorr_SSE0,brsq_SSE0)));
579             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(ewcorr_SSE1,brsq_SSE1)));
580             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(ewcorr_SSE2,brsq_SSE2)));
581             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(ewcorr_SSE3,brsq_SSE3)));
582
583 #ifdef CALC_ENERGIES
584             vc_sub_SSE0   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0),beta_SSE);
585             vc_sub_SSE1   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1),beta_SSE);
586             vc_sub_SSE2   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2),beta_SSE);
587             vc_sub_SSE3   = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3),beta_SSE);
588 #endif
589
590 #endif /* CALC_COUL_EWALD */
591
592 #ifdef CALC_COUL_TAB
593             /* Electrostatic interactions */
594             r_SSE0        = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
595             r_SSE1        = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
596             r_SSE2        = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
597             r_SSE3        = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
598             /* Convert r to scaled table units */
599             rs_SSE0       = gmx_mul_pr(r_SSE0,invtsp_SSE);
600             rs_SSE1       = gmx_mul_pr(r_SSE1,invtsp_SSE);
601             rs_SSE2       = gmx_mul_pr(r_SSE2,invtsp_SSE);
602             rs_SSE3       = gmx_mul_pr(r_SSE3,invtsp_SSE);
603             /* Truncate scaled r to an int */
604             ti_SSE0       = gmx_cvttpr_epi32(rs_SSE0);
605             ti_SSE1       = gmx_cvttpr_epi32(rs_SSE1);
606             ti_SSE2       = gmx_cvttpr_epi32(rs_SSE2);
607             ti_SSE3       = gmx_cvttpr_epi32(rs_SSE3);
608 #ifdef GMX_X86_SSE4_1
609             /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
610             rf_SSE0       = gmx_floor_pr(rs_SSE0);
611             rf_SSE1       = gmx_floor_pr(rs_SSE1);
612             rf_SSE2       = gmx_floor_pr(rs_SSE2);
613             rf_SSE3       = gmx_floor_pr(rs_SSE3);
614 #else
615             rf_SSE0       = gmx_cvtepi32_pr(ti_SSE0);
616             rf_SSE1       = gmx_cvtepi32_pr(ti_SSE1);
617             rf_SSE2       = gmx_cvtepi32_pr(ti_SSE2);
618             rf_SSE3       = gmx_cvtepi32_pr(ti_SSE3);
619 #endif
620             frac_SSE0     = gmx_sub_pr(rs_SSE0,rf_SSE0);
621             frac_SSE1     = gmx_sub_pr(rs_SSE1,rf_SSE1);
622             frac_SSE2     = gmx_sub_pr(rs_SSE2,rf_SSE2);
623             frac_SSE3     = gmx_sub_pr(rs_SSE3,rf_SSE3);
624
625             /* Load and interpolate table forces and possibly energies.
626              * Force and energy can be combined in one table, stride 4: FDV0
627              * or in two separate tables with stride 1: F and V
628              * Currently single precision uses FDV0, double F and V.
629              */
630 #ifndef CALC_ENERGIES
631             load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
632             load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
633             load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
634             load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
635 #else
636 #ifdef TAB_FDV0
637             load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
638             load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
639             load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
640             load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
641 #else
642             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
643             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
644             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
645             load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
646 #endif
647 #endif
648             fsub_SSE0     = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
649             fsub_SSE1     = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
650             fsub_SSE2     = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
651             fsub_SSE3     = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
652             frcoul_SSE0   = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
653             frcoul_SSE1   = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
654             frcoul_SSE2   = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
655             frcoul_SSE3   = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
656
657 #ifdef CALC_ENERGIES
658             vc_sub_SSE0   = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
659             vc_sub_SSE1   = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
660             vc_sub_SSE2   = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
661             vc_sub_SSE3   = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
662 #endif
663 #endif /* CALC_COUL_TAB */
664
665 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
666 #ifndef NO_SHIFT_EWALD
667             /* Add Ewald potential shift to vc_sub for convenience */
668 #ifdef CHECK_EXCLS
669             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
670             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
671             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
672             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
673 #else
674             vc_sub_SSE0   = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
675             vc_sub_SSE1   = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
676             vc_sub_SSE2   = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
677             vc_sub_SSE3   = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
678 #endif
679 #endif
680             
681             vcoul_SSE0    = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
682             vcoul_SSE1    = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
683             vcoul_SSE2    = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
684             vcoul_SSE3    = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
685
686 #endif
687
688 #ifdef CALC_ENERGIES
689             /* Mask energy for cut-off and diagonal */
690             vcoul_SSE0    = gmx_and_pr(vcoul_SSE0,wco_SSE0);
691             vcoul_SSE1    = gmx_and_pr(vcoul_SSE1,wco_SSE1);
692             vcoul_SSE2    = gmx_and_pr(vcoul_SSE2,wco_SSE2);
693             vcoul_SSE3    = gmx_and_pr(vcoul_SSE3,wco_SSE3);
694 #endif
695
696 #endif /* CALC_COULOMB */
697
698 #ifdef CALC_LJ
699             /* Lennard-Jones interaction */
700
701 #ifdef VDW_CUTOFF_CHECK
702             wco_vdw_SSE0  = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
703             wco_vdw_SSE1  = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
704 #ifndef HALF_LJ
705             wco_vdw_SSE2  = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
706             wco_vdw_SSE3  = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
707 #endif
708 #else
709             /* Same cut-off for Coulomb and VdW, reuse the registers */
710 #define     wco_vdw_SSE0    wco_SSE0
711 #define     wco_vdw_SSE1    wco_SSE1
712 #define     wco_vdw_SSE2    wco_SSE2
713 #define     wco_vdw_SSE3    wco_SSE3
714 #endif
715
716 #ifndef LJ_COMB_LB
717             rinvsix_SSE0  = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
718             rinvsix_SSE1  = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
719 #ifdef EXCL_FORCES
720             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,int_SSE0);
721             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,int_SSE1);
722 #endif
723 #ifndef HALF_LJ
724             rinvsix_SSE2  = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
725             rinvsix_SSE3  = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
726 #ifdef EXCL_FORCES
727             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,int_SSE2);
728             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,int_SSE3);
729 #endif
730 #endif
731 #ifdef VDW_CUTOFF_CHECK
732             rinvsix_SSE0  = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
733             rinvsix_SSE1  = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
734 #ifndef HALF_LJ
735             rinvsix_SSE2  = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
736             rinvsix_SSE3  = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
737 #endif
738 #endif
739             FrLJ6_SSE0    = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
740             FrLJ6_SSE1    = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
741 #ifndef HALF_LJ
742             FrLJ6_SSE2    = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
743             FrLJ6_SSE3    = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
744 #endif
745             FrLJ12_SSE0   = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
746             FrLJ12_SSE1   = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
747 #ifndef HALF_LJ
748             FrLJ12_SSE2   = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
749             FrLJ12_SSE3   = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
750 #endif
751 #endif /* not LJ_COMB_LB */
752
753 #ifdef LJ_COMB_LB
754             sir_SSE0      = gmx_mul_pr(sig_SSE0,rinv_SSE0);
755             sir_SSE1      = gmx_mul_pr(sig_SSE1,rinv_SSE1);
756 #ifndef HALF_LJ
757             sir_SSE2      = gmx_mul_pr(sig_SSE2,rinv_SSE2);
758             sir_SSE3      = gmx_mul_pr(sig_SSE3,rinv_SSE3);
759 #endif
760             sir2_SSE0     = gmx_mul_pr(sir_SSE0,sir_SSE0);
761             sir2_SSE1     = gmx_mul_pr(sir_SSE1,sir_SSE1);
762 #ifndef HALF_LJ
763             sir2_SSE2     = gmx_mul_pr(sir_SSE2,sir_SSE2);
764             sir2_SSE3     = gmx_mul_pr(sir_SSE3,sir_SSE3);
765 #endif
766             sir6_SSE0     = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
767             sir6_SSE1     = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
768 #ifdef EXCL_FORCES
769             sir6_SSE0     = gmx_and_pr(sir6_SSE0,int_SSE0);
770             sir6_SSE1     = gmx_and_pr(sir6_SSE1,int_SSE1);
771 #endif
772 #ifndef HALF_LJ
773             sir6_SSE2     = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
774             sir6_SSE3     = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
775 #ifdef EXCL_FORCES
776             sir6_SSE2     = gmx_and_pr(sir6_SSE2,int_SSE2);
777             sir6_SSE3     = gmx_and_pr(sir6_SSE3,int_SSE3);
778 #endif
779 #endif
780 #ifdef VDW_CUTOFF_CHECK
781             sir6_SSE0     = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
782             sir6_SSE1     = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
783 #ifndef HALF_LJ
784             sir6_SSE2     = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
785             sir6_SSE3     = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
786 #endif
787 #endif
788             FrLJ6_SSE0    = gmx_mul_pr(eps_SSE0,sir6_SSE0);
789             FrLJ6_SSE1    = gmx_mul_pr(eps_SSE1,sir6_SSE1);
790 #ifndef HALF_LJ
791             FrLJ6_SSE2    = gmx_mul_pr(eps_SSE2,sir6_SSE2);
792             FrLJ6_SSE3    = gmx_mul_pr(eps_SSE3,sir6_SSE3);
793 #endif
794             FrLJ12_SSE0   = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
795             FrLJ12_SSE1   = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
796 #ifndef HALF_LJ
797             FrLJ12_SSE2   = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
798             FrLJ12_SSE3   = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
799 #endif
800 #if defined CALC_ENERGIES
801             /* We need C6 and C12 to calculate the LJ potential shift */
802             sig2_SSE0     = gmx_mul_pr(sig_SSE0,sig_SSE0);
803             sig2_SSE1     = gmx_mul_pr(sig_SSE1,sig_SSE1);
804 #ifndef HALF_LJ
805             sig2_SSE2     = gmx_mul_pr(sig_SSE2,sig_SSE2);
806             sig2_SSE3     = gmx_mul_pr(sig_SSE3,sig_SSE3);
807 #endif
808             sig6_SSE0     = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
809             sig6_SSE1     = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
810 #ifndef HALF_LJ
811             sig6_SSE2     = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
812             sig6_SSE3     = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
813 #endif
814             c6_SSE0       = gmx_mul_pr(eps_SSE0,sig6_SSE0);
815             c6_SSE1       = gmx_mul_pr(eps_SSE1,sig6_SSE1);
816 #ifndef HALF_LJ
817             c6_SSE2       = gmx_mul_pr(eps_SSE2,sig6_SSE2);
818             c6_SSE3       = gmx_mul_pr(eps_SSE3,sig6_SSE3);
819 #endif
820             c12_SSE0      = gmx_mul_pr(c6_SSE0,sig6_SSE0);
821             c12_SSE1      = gmx_mul_pr(c6_SSE1,sig6_SSE1);
822 #ifndef HALF_LJ
823             c12_SSE2      = gmx_mul_pr(c6_SSE2,sig6_SSE2);
824             c12_SSE3      = gmx_mul_pr(c6_SSE3,sig6_SSE3);
825 #endif
826 #endif
827 #endif /* LJ_COMB_LB */
828
829 #endif /* CALC_LJ */
830             
831 #ifdef CALC_ENERGIES
832 #ifdef ENERGY_GROUPS
833             /* Extract the group pair index per j pair.
834              * Energy groups are stored per i-cluster, so things get
835              * complicated when the i- and j-cluster size don't match.
836              */
837             {
838                 int egps_j;
839 #if UNROLLJ == 2
840                 egps_j    = nbat->energrp[cj>>1];
841                 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
842 #else
843                 /* We assume UNROLLI <= UNROLLJ */
844                 int jdi;
845                 for(jdi=0; jdi<UNROLLJ/UNROLLI; jdi++)
846                 {
847                     int jj;
848                     egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
849                     for(jj=0; jj<(UNROLLI/2); jj++)
850                     {
851                         egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
852                     }
853                 }
854 #endif
855             }
856 #endif
857
858 #ifdef CALC_COULOMB
859 #ifndef ENERGY_GROUPS
860             vctotSSE      = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
861 #else
862             add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
863             add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
864             add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
865             add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
866 #endif
867 #endif
868
869 #ifdef CALC_LJ
870             /* Calculate the LJ energies */
871             VLJ6_SSE0     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
872             VLJ6_SSE1     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
873 #ifndef HALF_LJ
874             VLJ6_SSE2     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
875             VLJ6_SSE3     = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
876 #endif
877             VLJ12_SSE0    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
878             VLJ12_SSE1    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
879 #ifndef HALF_LJ
880             VLJ12_SSE2    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
881             VLJ12_SSE3    = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
882 #endif
883
884             VLJ_SSE0      = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
885             VLJ_SSE1      = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
886 #ifndef HALF_LJ
887             VLJ_SSE2      = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
888             VLJ_SSE3      = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
889 #endif
890             /* The potential shift should be removed for pairs beyond cut-off */
891             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
892             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
893 #ifndef HALF_LJ
894             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
895             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
896 #endif
897 #ifdef CHECK_EXCLS
898             /* The potential shift should be removed for excluded pairs */
899             VLJ_SSE0      = gmx_and_pr(VLJ_SSE0,int_SSE0);
900             VLJ_SSE1      = gmx_and_pr(VLJ_SSE1,int_SSE1);
901 #ifndef HALF_LJ
902             VLJ_SSE2      = gmx_and_pr(VLJ_SSE2,int_SSE2);
903             VLJ_SSE3      = gmx_and_pr(VLJ_SSE3,int_SSE3);
904 #endif
905 #endif
906 #ifndef ENERGY_GROUPS
907             VvdwtotSSE    = gmx_add_pr(VvdwtotSSE,
908 #ifndef HALF_LJ
909                                        gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
910 #else
911                                        gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
912 #endif
913                                       );
914 #else
915             add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
916             add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
917 #ifndef HALF_LJ
918             add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
919             add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
920 #endif
921 #endif
922 #endif /* CALC_LJ */
923 #endif /* CALC_ENERGIES */
924
925 #ifdef CALC_LJ
926             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,
927 #ifdef CALC_COULOMB
928                                                    gmx_add_pr(frcoul_SSE0,
929 #else
930                                                    (
931 #endif
932                                                     gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
933             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,
934 #ifdef CALC_COULOMB
935                                                    gmx_add_pr(frcoul_SSE1,
936 #else
937                                                    (
938 #endif
939                                                     gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
940 #else
941             fscal_SSE0    = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
942             fscal_SSE1    = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
943 #endif /* CALC_LJ */
944 #if defined CALC_LJ && !defined HALF_LJ
945             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,
946 #ifdef CALC_COULOMB
947                                                    gmx_add_pr(frcoul_SSE2,
948 #else
949                                                    (
950 #endif
951                                                     gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
952             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,
953 #ifdef CALC_COULOMB
954                                                    gmx_add_pr(frcoul_SSE3,
955 #else
956                                                    (
957 #endif
958                                                     gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
959 #else
960             /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
961             fscal_SSE2    = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
962             fscal_SSE3    = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
963 #endif
964             
965             /* Calculate temporary vectorial force */
966             tx_SSE0       = gmx_mul_pr(fscal_SSE0,dx_SSE0);
967             tx_SSE1       = gmx_mul_pr(fscal_SSE1,dx_SSE1);
968             tx_SSE2       = gmx_mul_pr(fscal_SSE2,dx_SSE2);
969             tx_SSE3       = gmx_mul_pr(fscal_SSE3,dx_SSE3);
970             ty_SSE0       = gmx_mul_pr(fscal_SSE0,dy_SSE0);
971             ty_SSE1       = gmx_mul_pr(fscal_SSE1,dy_SSE1);
972             ty_SSE2       = gmx_mul_pr(fscal_SSE2,dy_SSE2);
973             ty_SSE3       = gmx_mul_pr(fscal_SSE3,dy_SSE3);
974             tz_SSE0       = gmx_mul_pr(fscal_SSE0,dz_SSE0);
975             tz_SSE1       = gmx_mul_pr(fscal_SSE1,dz_SSE1);
976             tz_SSE2       = gmx_mul_pr(fscal_SSE2,dz_SSE2);
977             tz_SSE3       = gmx_mul_pr(fscal_SSE3,dz_SSE3);
978             
979             /* Increment i atom force */
980             fix_SSE0      = gmx_add_pr(fix_SSE0,tx_SSE0);
981             fix_SSE1      = gmx_add_pr(fix_SSE1,tx_SSE1);
982             fix_SSE2      = gmx_add_pr(fix_SSE2,tx_SSE2);
983             fix_SSE3      = gmx_add_pr(fix_SSE3,tx_SSE3);
984             fiy_SSE0      = gmx_add_pr(fiy_SSE0,ty_SSE0);
985             fiy_SSE1      = gmx_add_pr(fiy_SSE1,ty_SSE1);
986             fiy_SSE2      = gmx_add_pr(fiy_SSE2,ty_SSE2);
987             fiy_SSE3      = gmx_add_pr(fiy_SSE3,ty_SSE3);
988             fiz_SSE0      = gmx_add_pr(fiz_SSE0,tz_SSE0);
989             fiz_SSE1      = gmx_add_pr(fiz_SSE1,tz_SSE1);
990             fiz_SSE2      = gmx_add_pr(fiz_SSE2,tz_SSE2);
991             fiz_SSE3      = gmx_add_pr(fiz_SSE3,tz_SSE3);
992             
993             /* Decrement j atom force */
994             gmx_store_pr(f+ajx,
995                          gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
996             gmx_store_pr(f+ajy,
997                          gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
998             gmx_store_pr(f+ajz,
999                          gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));
1000         }
1001
1002 #undef  rinv_ex_SSE0
1003 #undef  rinv_ex_SSE1
1004 #undef  rinv_ex_SSE2
1005 #undef  rinv_ex_SSE3
1006
1007 #undef  wco_vdw_SSE0
1008 #undef  wco_vdw_SSE1
1009 #undef  wco_vdw_SSE2
1010 #undef  wco_vdw_SSE3
1011
1012 #undef  CUTOFF_BLENDV
1013
1014 #undef  EXCL_FORCES