1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
9 * Copyright (c) 2001-2009, The GROMACS Development Team
11 * Gromacs is a library for molecular simulation and trajectory analysis,
12 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
13 * a full list of developers and information, check out http://www.gromacs.org
15 * This program is free software; you can redistribute it and/or modify it under
16 * the terms of the GNU Lesser General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option) any
19 * As a special exception, you may use this file as part of a free software
20 * library without restriction. Specifically, if other files instantiate
21 * templates or use macros or inline functions from this file, or you compile
22 * this file and link it with other files to produce an executable, this
23 * file does not by itself cause the resulting executable to be covered by
24 * the GNU Lesser General Public License.
26 * In plain-speak: do not worry about classes/macros/templates either - only
27 * changes to the library have to be LGPL, not an application linking with it.
29 * To help fund GROMACS development, we humbly ask that you cite
30 * the papers people have written on it - you can find them on the website!
33 /* This is the innermost loop contents for the n vs n atom
34 * SSE2 single precision kernels.
38 /* When calculating RF or Ewald interactions we calculate the electrostatic
39 * forces on excluded atom pairs here in the non-bonded loops.
40 * But when energies and/or virial is required we calculate them
41 * separately to as then it is easier to separate the energy and virial
44 #if defined CHECK_EXCLS && defined CALC_COULOMB
48 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS && !(defined __GNUC__ && (defined CALC_COUL_TAB || (defined CALC_COUL_RF && defined GMX_MM128_HERE)))
49 /* Without exclusions and energies we only need to mask the cut-off,
50 * this is faster with blendv (only available with SSE4.1 and later).
51 * With gcc and PME or RF in 128-bit, blendv is slower;
52 * tested with gcc 4.6.2, 4.6.3 and 4.7.1.
58 int cj,aj,ajx,ajy,ajz;
62 int egp_jj[UNROLLJ>>1];
67 /* Interaction (non-exclusion) mask of all 1's or 0's */
74 gmx_mm_pr jxSSE,jySSE,jzSSE;
75 gmx_mm_pr dx_SSE0,dy_SSE0,dz_SSE0;
76 gmx_mm_pr dx_SSE1,dy_SSE1,dz_SSE1;
77 gmx_mm_pr dx_SSE2,dy_SSE2,dz_SSE2;
78 gmx_mm_pr dx_SSE3,dy_SSE3,dz_SSE3;
79 gmx_mm_pr tx_SSE0,ty_SSE0,tz_SSE0;
80 gmx_mm_pr tx_SSE1,ty_SSE1,tz_SSE1;
81 gmx_mm_pr tx_SSE2,ty_SSE2,tz_SSE2;
82 gmx_mm_pr tx_SSE3,ty_SSE3,tz_SSE3;
83 gmx_mm_pr rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
84 gmx_mm_pr rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
85 gmx_mm_pr rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
86 gmx_mm_pr rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
88 /* wco: within cut-off, mask of all 1's or 0's */
94 #ifdef VDW_CUTOFF_CHECK
95 gmx_mm_pr wco_vdw_SSE0;
96 gmx_mm_pr wco_vdw_SSE1;
98 gmx_mm_pr wco_vdw_SSE2;
99 gmx_mm_pr wco_vdw_SSE3;
104 /* 1/r masked with the interaction mask */
105 gmx_mm_pr rinv_ex_SSE0;
106 gmx_mm_pr rinv_ex_SSE1;
107 gmx_mm_pr rinv_ex_SSE2;
108 gmx_mm_pr rinv_ex_SSE3;
116 /* The force (PME mesh force) we need to subtract from 1/r^2 */
122 /* frcoul = (1/r - fsub)*r */
123 gmx_mm_pr frcoul_SSE0;
124 gmx_mm_pr frcoul_SSE1;
125 gmx_mm_pr frcoul_SSE2;
126 gmx_mm_pr frcoul_SSE3;
128 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
129 gmx_mm_pr r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
130 gmx_mm_pr r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
131 gmx_mm_pr r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
132 gmx_mm_pr r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
133 /* Table index: rs converted to an int */
134 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
135 gmx_epi32 ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
137 __m128i ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
139 /* Linear force table values */
140 gmx_mm_pr ctab0_SSE0,ctab1_SSE0;
141 gmx_mm_pr ctab0_SSE1,ctab1_SSE1;
142 gmx_mm_pr ctab0_SSE2,ctab1_SSE2;
143 gmx_mm_pr ctab0_SSE3,ctab1_SSE3;
145 /* Quadratic energy table value */
146 gmx_mm_pr ctabv_SSE0;
147 gmx_mm_pr ctabv_SSE1;
148 gmx_mm_pr ctabv_SSE2;
149 gmx_mm_pr ctabv_SSE3;
150 /* The potential (PME mesh) we need to subtract from 1/r */
151 gmx_mm_pr vc_sub_SSE0;
152 gmx_mm_pr vc_sub_SSE1;
153 gmx_mm_pr vc_sub_SSE2;
154 gmx_mm_pr vc_sub_SSE3;
158 /* Electrostatic potential */
159 gmx_mm_pr vcoul_SSE0;
160 gmx_mm_pr vcoul_SSE1;
161 gmx_mm_pr vcoul_SSE2;
162 gmx_mm_pr vcoul_SSE3;
165 /* The force times 1/r */
166 gmx_mm_pr fscal_SSE0;
167 gmx_mm_pr fscal_SSE1;
168 gmx_mm_pr fscal_SSE2;
169 gmx_mm_pr fscal_SSE3;
173 /* LJ sigma_j/2 and sqrt(epsilon_j) */
174 gmx_mm_pr hsig_j_SSE,seps_j_SSE;
175 /* LJ sigma_ij and epsilon_ij */
176 gmx_mm_pr sig_SSE0,eps_SSE0;
177 gmx_mm_pr sig_SSE1,eps_SSE1;
179 gmx_mm_pr sig_SSE2,eps_SSE2;
180 gmx_mm_pr sig_SSE3,eps_SSE3;
183 gmx_mm_pr sig2_SSE0,sig6_SSE0;
184 gmx_mm_pr sig2_SSE1,sig6_SSE1;
186 gmx_mm_pr sig2_SSE2,sig6_SSE2;
187 gmx_mm_pr sig2_SSE3,sig6_SSE3;
189 #endif /* LJ_COMB_LB */
193 gmx_mm_pr c6s_j_SSE,c12s_j_SSE;
196 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
197 /* Index for loading LJ parameters, complicated when interleaving */
202 /* LJ C6 and C12 parameters, used with geometric comb. rule */
203 gmx_mm_pr c6_SSE0,c12_SSE0;
204 gmx_mm_pr c6_SSE1,c12_SSE1;
206 gmx_mm_pr c6_SSE2,c12_SSE2;
207 gmx_mm_pr c6_SSE3,c12_SSE3;
211 /* Intermediate variables for LJ calculation */
213 gmx_mm_pr rinvsix_SSE0;
214 gmx_mm_pr rinvsix_SSE1;
216 gmx_mm_pr rinvsix_SSE2;
217 gmx_mm_pr rinvsix_SSE3;
221 gmx_mm_pr sir_SSE0,sir2_SSE0,sir6_SSE0;
222 gmx_mm_pr sir_SSE1,sir2_SSE1,sir6_SSE1;
224 gmx_mm_pr sir_SSE2,sir2_SSE2,sir6_SSE2;
225 gmx_mm_pr sir_SSE3,sir2_SSE3,sir6_SSE3;
229 gmx_mm_pr FrLJ6_SSE0,FrLJ12_SSE0;
230 gmx_mm_pr FrLJ6_SSE1,FrLJ12_SSE1;
232 gmx_mm_pr FrLJ6_SSE2,FrLJ12_SSE2;
233 gmx_mm_pr FrLJ6_SSE3,FrLJ12_SSE3;
236 gmx_mm_pr VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
237 gmx_mm_pr VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
239 gmx_mm_pr VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
240 gmx_mm_pr VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
245 /* j-cluster index */
248 /* Atom indices (of the first atom in the cluster) */
250 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
251 #if UNROLLJ == STRIDE
254 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
257 #if UNROLLJ == STRIDE
260 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
266 #ifndef GMX_MM256_HERE
268 /* Load integer interaction mask */
269 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
271 /* The is no unequal sse instruction, so we need a not here */
272 int_SSE0 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
273 int_SSE1 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
274 int_SSE2 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
275 int_SSE3 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
280 /* Load integer interaction mask */
281 /* With AVX there are no integer operations, so cast to real */
282 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
283 /* We can't compare all 4*8=32 float bits: shift the mask */
284 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
285 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
286 * With gcc we don't need the cast, but it's faster.
288 #define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
289 int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
290 int_SSE1 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
291 int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
292 int_SSE3 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
295 /* Load integer interaction mask */
296 /* With AVX there are no integer operations,
297 * and there is no int to double conversion, so cast to float
299 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
300 #define cast_cvt(x) _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
301 int_SSE0 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
302 int_SSE1 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
303 int_SSE2 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
304 int_SSE3 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
310 /* load j atom coordinates */
311 jxSSE = gmx_load_pr(x+ajx);
312 jySSE = gmx_load_pr(x+ajy);
313 jzSSE = gmx_load_pr(x+ajz);
315 /* Calculate distance */
316 dx_SSE0 = gmx_sub_pr(ix_SSE0,jxSSE);
317 dy_SSE0 = gmx_sub_pr(iy_SSE0,jySSE);
318 dz_SSE0 = gmx_sub_pr(iz_SSE0,jzSSE);
319 dx_SSE1 = gmx_sub_pr(ix_SSE1,jxSSE);
320 dy_SSE1 = gmx_sub_pr(iy_SSE1,jySSE);
321 dz_SSE1 = gmx_sub_pr(iz_SSE1,jzSSE);
322 dx_SSE2 = gmx_sub_pr(ix_SSE2,jxSSE);
323 dy_SSE2 = gmx_sub_pr(iy_SSE2,jySSE);
324 dz_SSE2 = gmx_sub_pr(iz_SSE2,jzSSE);
325 dx_SSE3 = gmx_sub_pr(ix_SSE3,jxSSE);
326 dy_SSE3 = gmx_sub_pr(iy_SSE3,jySSE);
327 dz_SSE3 = gmx_sub_pr(iz_SSE3,jzSSE);
329 /* rsq = dx*dx+dy*dy+dz*dz */
330 rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
331 rsq_SSE1 = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
332 rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
333 rsq_SSE3 = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
335 #ifndef CUTOFF_BLENDV
336 wco_SSE0 = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
337 wco_SSE1 = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
338 wco_SSE2 = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
339 wco_SSE3 = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
344 /* Only remove the (sub-)diagonal to avoid double counting */
345 #if UNROLLJ == UNROLLI
348 wco_SSE0 = gmx_and_pr(wco_SSE0,diag_SSE0);
349 wco_SSE1 = gmx_and_pr(wco_SSE1,diag_SSE1);
350 wco_SSE2 = gmx_and_pr(wco_SSE2,diag_SSE2);
351 wco_SSE3 = gmx_and_pr(wco_SSE3,diag_SSE3);
354 #if UNROLLJ < UNROLLI
357 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
358 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
359 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
360 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
362 if (cj == ci_sh*2 + 1)
364 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
365 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
366 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
367 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
372 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
373 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
374 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
375 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
377 else if (cj*2 + 1 == ci_sh)
379 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
380 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
381 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
382 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
386 #else /* EXCL_FORCES */
387 /* Remove all excluded atom pairs from the list */
388 wco_SSE0 = gmx_and_pr(wco_SSE0,int_SSE0);
389 wco_SSE1 = gmx_and_pr(wco_SSE1,int_SSE1);
390 wco_SSE2 = gmx_and_pr(wco_SSE2,int_SSE2);
391 wco_SSE3 = gmx_and_pr(wco_SSE3,int_SSE3);
399 for(i=0; i<UNROLLI; i++)
401 gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
402 for(j=0; j<UNROLLJ; j++)
414 /* For excluded pairs add a small number to avoid r^-6 = NaN */
415 rsq_SSE0 = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
416 rsq_SSE1 = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
417 rsq_SSE2 = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
418 rsq_SSE3 = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
423 rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
424 rinv_SSE1 = gmx_invsqrt_pr(rsq_SSE1);
425 rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
426 rinv_SSE3 = gmx_invsqrt_pr(rsq_SSE3);
428 GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
429 GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
433 /* Load parameters for j atom */
434 jq_SSE = gmx_load_pr(q+aj);
435 qq_SSE0 = gmx_mul_pr(iq_SSE0,jq_SSE);
436 qq_SSE1 = gmx_mul_pr(iq_SSE1,jq_SSE);
437 qq_SSE2 = gmx_mul_pr(iq_SSE2,jq_SSE);
438 qq_SSE3 = gmx_mul_pr(iq_SSE3,jq_SSE);
443 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
444 load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
445 load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
447 load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
448 load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
450 #endif /* not defined any LJ rule */
453 c6s_j_SSE = gmx_load_pr(ljc+aj2+0);
454 c12s_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
455 c6_SSE0 = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
456 c6_SSE1 = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
458 c6_SSE2 = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
459 c6_SSE3 = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
461 c12_SSE0 = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
462 c12_SSE1 = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
464 c12_SSE2 = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
465 c12_SSE3 = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
467 #endif /* LJ_COMB_GEOM */
470 hsig_j_SSE = gmx_load_pr(ljc+aj2+0);
471 seps_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
473 sig_SSE0 = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
474 sig_SSE1 = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
475 eps_SSE0 = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
476 eps_SSE1 = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
478 sig_SSE2 = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
479 sig_SSE3 = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
480 eps_SSE2 = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
481 eps_SSE3 = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
483 #endif /* LJ_COMB_LB */
487 #ifndef CUTOFF_BLENDV
488 rinv_SSE0 = gmx_and_pr(rinv_SSE0,wco_SSE0);
489 rinv_SSE1 = gmx_and_pr(rinv_SSE1,wco_SSE1);
490 rinv_SSE2 = gmx_and_pr(rinv_SSE2,wco_SSE2);
491 rinv_SSE3 = gmx_and_pr(rinv_SSE3,wco_SSE3);
493 /* We only need to mask for the cut-off: blendv is faster */
494 rinv_SSE0 = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
495 rinv_SSE1 = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
496 rinv_SSE2 = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
497 rinv_SSE3 = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
500 rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
501 rinvsq_SSE1 = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
502 rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
503 rinvsq_SSE3 = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
506 /* Note that here we calculate force*r, not the usual force/r.
507 * This allows avoiding masking the reaction-field contribution,
508 * as frcoul is later multiplied by rinvsq which has been
509 * masked with the cut-off check.
513 /* Only add 1/r for non-excluded atom pairs */
514 rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0,int_SSE0);
515 rinv_ex_SSE1 = gmx_and_pr(rinv_SSE1,int_SSE1);
516 rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2,int_SSE2);
517 rinv_ex_SSE3 = gmx_and_pr(rinv_SSE3,int_SSE3);
519 /* No exclusion forces, we always need 1/r */
520 #define rinv_ex_SSE0 rinv_SSE0
521 #define rinv_ex_SSE1 rinv_SSE1
522 #define rinv_ex_SSE2 rinv_SSE2
523 #define rinv_ex_SSE3 rinv_SSE3
527 /* Electrostatic interactions */
528 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
529 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
530 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
531 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
534 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
535 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
536 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
537 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
542 /* Electrostatic interactions */
543 r_SSE0 = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
544 r_SSE1 = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
545 r_SSE2 = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
546 r_SSE3 = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
547 /* Convert r to scaled table units */
548 rs_SSE0 = gmx_mul_pr(r_SSE0,invtsp_SSE);
549 rs_SSE1 = gmx_mul_pr(r_SSE1,invtsp_SSE);
550 rs_SSE2 = gmx_mul_pr(r_SSE2,invtsp_SSE);
551 rs_SSE3 = gmx_mul_pr(r_SSE3,invtsp_SSE);
552 /* Truncate scaled r to an int */
553 ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
554 ti_SSE1 = gmx_cvttpr_epi32(rs_SSE1);
555 ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
556 ti_SSE3 = gmx_cvttpr_epi32(rs_SSE3);
557 #ifdef GMX_X86_SSE4_1
558 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
559 rf_SSE0 = gmx_floor_pr(rs_SSE0);
560 rf_SSE1 = gmx_floor_pr(rs_SSE1);
561 rf_SSE2 = gmx_floor_pr(rs_SSE2);
562 rf_SSE3 = gmx_floor_pr(rs_SSE3);
564 rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
565 rf_SSE1 = gmx_cvtepi32_pr(ti_SSE1);
566 rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
567 rf_SSE3 = gmx_cvtepi32_pr(ti_SSE3);
569 frac_SSE0 = gmx_sub_pr(rs_SSE0,rf_SSE0);
570 frac_SSE1 = gmx_sub_pr(rs_SSE1,rf_SSE1);
571 frac_SSE2 = gmx_sub_pr(rs_SSE2,rf_SSE2);
572 frac_SSE3 = gmx_sub_pr(rs_SSE3,rf_SSE3);
574 /* Load and interpolate table forces and possibly energies.
575 * Force and energy can be combined in one table, stride 4: FDV0
576 * or in two separate tables with stride 1: F and V
577 * Currently single precision uses FDV0, double F and V.
579 #ifndef CALC_ENERGIES
580 load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
581 load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
582 load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
583 load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
586 load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
587 load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
588 load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
589 load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
591 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
592 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
593 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
594 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
597 fsub_SSE0 = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
598 fsub_SSE1 = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
599 fsub_SSE2 = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
600 fsub_SSE3 = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
601 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
602 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
603 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
604 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
607 vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
608 vc_sub_SSE1 = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
609 vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
610 vc_sub_SSE3 = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
612 #ifndef NO_SHIFT_EWALD
613 /* Add Ewald potential shift to vc_sub for convenience */
615 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
616 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
617 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
618 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
620 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
621 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
622 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
623 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
627 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
628 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
629 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
630 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
636 /* Mask energy for cut-off and diagonal */
637 vcoul_SSE0 = gmx_and_pr(vcoul_SSE0,wco_SSE0);
638 vcoul_SSE1 = gmx_and_pr(vcoul_SSE1,wco_SSE1);
639 vcoul_SSE2 = gmx_and_pr(vcoul_SSE2,wco_SSE2);
640 vcoul_SSE3 = gmx_and_pr(vcoul_SSE3,wco_SSE3);
643 #endif /* CALC_COULOMB */
646 /* Lennard-Jones interaction */
648 #ifdef VDW_CUTOFF_CHECK
649 wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
650 wco_vdw_SSE1 = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
652 wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
653 wco_vdw_SSE3 = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
656 /* Same cut-off for Coulomb and VdW, reuse the registers */
657 #define wco_vdw_SSE0 wco_SSE0
658 #define wco_vdw_SSE1 wco_SSE1
659 #define wco_vdw_SSE2 wco_SSE2
660 #define wco_vdw_SSE3 wco_SSE3
664 rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
665 rinvsix_SSE1 = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
667 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,int_SSE0);
668 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,int_SSE1);
671 rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
672 rinvsix_SSE3 = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
674 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,int_SSE2);
675 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,int_SSE3);
678 #ifdef VDW_CUTOFF_CHECK
679 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
680 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
682 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
683 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
686 FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
687 FrLJ6_SSE1 = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
689 FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
690 FrLJ6_SSE3 = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
692 FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
693 FrLJ12_SSE1 = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
695 FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
696 FrLJ12_SSE3 = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
698 #endif /* not LJ_COMB_LB */
701 sir_SSE0 = gmx_mul_pr(sig_SSE0,rinv_SSE0);
702 sir_SSE1 = gmx_mul_pr(sig_SSE1,rinv_SSE1);
704 sir_SSE2 = gmx_mul_pr(sig_SSE2,rinv_SSE2);
705 sir_SSE3 = gmx_mul_pr(sig_SSE3,rinv_SSE3);
707 sir2_SSE0 = gmx_mul_pr(sir_SSE0,sir_SSE0);
708 sir2_SSE1 = gmx_mul_pr(sir_SSE1,sir_SSE1);
710 sir2_SSE2 = gmx_mul_pr(sir_SSE2,sir_SSE2);
711 sir2_SSE3 = gmx_mul_pr(sir_SSE3,sir_SSE3);
713 sir6_SSE0 = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
714 sir6_SSE1 = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
716 sir6_SSE0 = gmx_and_pr(sir6_SSE0,int_SSE0);
717 sir6_SSE1 = gmx_and_pr(sir6_SSE1,int_SSE1);
720 sir6_SSE2 = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
721 sir6_SSE3 = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
723 sir6_SSE2 = gmx_and_pr(sir6_SSE2,int_SSE2);
724 sir6_SSE3 = gmx_and_pr(sir6_SSE3,int_SSE3);
727 #ifdef VDW_CUTOFF_CHECK
728 sir6_SSE0 = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
729 sir6_SSE1 = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
731 sir6_SSE2 = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
732 sir6_SSE3 = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
735 FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0,sir6_SSE0);
736 FrLJ6_SSE1 = gmx_mul_pr(eps_SSE1,sir6_SSE1);
738 FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2,sir6_SSE2);
739 FrLJ6_SSE3 = gmx_mul_pr(eps_SSE3,sir6_SSE3);
741 FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
742 FrLJ12_SSE1 = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
744 FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
745 FrLJ12_SSE3 = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
747 #if defined CALC_ENERGIES
748 /* We need C6 and C12 to calculate the LJ potential shift */
749 sig2_SSE0 = gmx_mul_pr(sig_SSE0,sig_SSE0);
750 sig2_SSE1 = gmx_mul_pr(sig_SSE1,sig_SSE1);
752 sig2_SSE2 = gmx_mul_pr(sig_SSE2,sig_SSE2);
753 sig2_SSE3 = gmx_mul_pr(sig_SSE3,sig_SSE3);
755 sig6_SSE0 = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
756 sig6_SSE1 = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
758 sig6_SSE2 = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
759 sig6_SSE3 = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
761 c6_SSE0 = gmx_mul_pr(eps_SSE0,sig6_SSE0);
762 c6_SSE1 = gmx_mul_pr(eps_SSE1,sig6_SSE1);
764 c6_SSE2 = gmx_mul_pr(eps_SSE2,sig6_SSE2);
765 c6_SSE3 = gmx_mul_pr(eps_SSE3,sig6_SSE3);
767 c12_SSE0 = gmx_mul_pr(c6_SSE0,sig6_SSE0);
768 c12_SSE1 = gmx_mul_pr(c6_SSE1,sig6_SSE1);
770 c12_SSE2 = gmx_mul_pr(c6_SSE2,sig6_SSE2);
771 c12_SSE3 = gmx_mul_pr(c6_SSE3,sig6_SSE3);
774 #endif /* LJ_COMB_LB */
780 /* Extract the group pair index per j pair */
782 egps_j = nbat->energrp[cj>>1];
783 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
785 egps_j = nbat->energrp[cj];
786 for(jj=0; jj<(UNROLLJ>>1); jj++)
788 egp_jj[jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
794 #ifndef ENERGY_GROUPS
795 vctotSSE = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
797 add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
798 add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
799 add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
800 add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
805 /* Calculate the LJ energies */
806 VLJ6_SSE0 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
807 VLJ6_SSE1 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
809 VLJ6_SSE2 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
810 VLJ6_SSE3 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
812 VLJ12_SSE0 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
813 VLJ12_SSE1 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
815 VLJ12_SSE2 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
816 VLJ12_SSE3 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
819 VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
820 VLJ_SSE1 = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
822 VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
823 VLJ_SSE3 = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
825 /* The potential shift should be removed for pairs beyond cut-off */
826 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
827 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
829 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
830 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
833 /* The potential shift should be removed for excluded pairs */
834 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,int_SSE0);
835 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,int_SSE1);
837 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,int_SSE2);
838 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,int_SSE3);
841 #ifndef ENERGY_GROUPS
842 VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
844 gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
846 gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
850 add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
851 add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
853 add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
854 add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
858 #endif /* CALC_ENERGIES */
861 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
863 gmx_add_pr(frcoul_SSE0,
867 gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
868 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,
870 gmx_add_pr(frcoul_SSE1,
874 gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
876 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
877 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
879 #if defined CALC_LJ && !defined HALF_LJ
880 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
882 gmx_add_pr(frcoul_SSE2,
886 gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
887 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,
889 gmx_add_pr(frcoul_SSE3,
893 gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
895 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
896 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
897 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
900 /* Calculate temporary vectorial force */
901 tx_SSE0 = gmx_mul_pr(fscal_SSE0,dx_SSE0);
902 tx_SSE1 = gmx_mul_pr(fscal_SSE1,dx_SSE1);
903 tx_SSE2 = gmx_mul_pr(fscal_SSE2,dx_SSE2);
904 tx_SSE3 = gmx_mul_pr(fscal_SSE3,dx_SSE3);
905 ty_SSE0 = gmx_mul_pr(fscal_SSE0,dy_SSE0);
906 ty_SSE1 = gmx_mul_pr(fscal_SSE1,dy_SSE1);
907 ty_SSE2 = gmx_mul_pr(fscal_SSE2,dy_SSE2);
908 ty_SSE3 = gmx_mul_pr(fscal_SSE3,dy_SSE3);
909 tz_SSE0 = gmx_mul_pr(fscal_SSE0,dz_SSE0);
910 tz_SSE1 = gmx_mul_pr(fscal_SSE1,dz_SSE1);
911 tz_SSE2 = gmx_mul_pr(fscal_SSE2,dz_SSE2);
912 tz_SSE3 = gmx_mul_pr(fscal_SSE3,dz_SSE3);
914 /* Increment i atom force */
915 fix_SSE0 = gmx_add_pr(fix_SSE0,tx_SSE0);
916 fix_SSE1 = gmx_add_pr(fix_SSE1,tx_SSE1);
917 fix_SSE2 = gmx_add_pr(fix_SSE2,tx_SSE2);
918 fix_SSE3 = gmx_add_pr(fix_SSE3,tx_SSE3);
919 fiy_SSE0 = gmx_add_pr(fiy_SSE0,ty_SSE0);
920 fiy_SSE1 = gmx_add_pr(fiy_SSE1,ty_SSE1);
921 fiy_SSE2 = gmx_add_pr(fiy_SSE2,ty_SSE2);
922 fiy_SSE3 = gmx_add_pr(fiy_SSE3,ty_SSE3);
923 fiz_SSE0 = gmx_add_pr(fiz_SSE0,tz_SSE0);
924 fiz_SSE1 = gmx_add_pr(fiz_SSE1,tz_SSE1);
925 fiz_SSE2 = gmx_add_pr(fiz_SSE2,tz_SSE2);
926 fiz_SSE3 = gmx_add_pr(fiz_SSE3,tz_SSE3);
928 /* Decrement j atom force */
930 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
932 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
934 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));