Update copyright statements and change license to LGPL
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_avx_128_fma_single / nb_kernel_ElecGB_VdwNone_GeomP1P1_avx_128_fma_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012, by the GROMACS development team, led by
5  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6  * others, as listed in the AUTHORS file in the top-level source
7  * directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_128_fma_single kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gmx_math_x86_avx_128_fma_single.h"
50 #include "kernelutil_x86_avx_128_fma_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_128_fma_single
54  * Electrostatics interaction: GeneralizedBorn
55  * VdW interaction:            None
56  * Geometry:                   Particle-Particle
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_128_fma_single
61                     (t_nblist * gmx_restrict                nlist,
62                      rvec * gmx_restrict                    xx,
63                      rvec * gmx_restrict                    ff,
64                      t_forcerec * gmx_restrict              fr,
65                      t_mdatoms * gmx_restrict               mdatoms,
66                      nb_kernel_data_t * gmx_restrict        kernel_data,
67                      t_nrnb * gmx_restrict                  nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX_128, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
83     real             scratch[4*DIM];
84     __m128           fscal,rcutoff,rcutoff2,jidxall;
85     int              vdwioffset0;
86     __m128           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
88     __m128           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
89     __m128           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
90     __m128           velec,felec,velecsum,facel,crf,krf,krf2;
91     real             *charge;
92     __m128i          gbitab;
93     __m128           vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,twogbeps,dvdatmp;
94     __m128           minushalf = _mm_set1_ps(-0.5);
95     real             *invsqrta,*dvda,*gbtab;
96     __m128i          vfitab;
97     __m128i          ifour       = _mm_set1_epi32(4);
98     __m128           rt,vfeps,twovfeps,vftabscale,Y,F,G,H,Fp,VV,FF;
99     real             *vftab;
100     __m128           dummy_mask,cutoff_mask;
101     __m128           signbit = _mm_castsi128_ps( _mm_set1_epi32(0x80000000) );
102     __m128           one     = _mm_set1_ps(1.0);
103     __m128           two     = _mm_set1_ps(2.0);
104     x                = xx[0];
105     f                = ff[0];
106
107     nri              = nlist->nri;
108     iinr             = nlist->iinr;
109     jindex           = nlist->jindex;
110     jjnr             = nlist->jjnr;
111     shiftidx         = nlist->shift;
112     gid              = nlist->gid;
113     shiftvec         = fr->shift_vec[0];
114     fshift           = fr->fshift[0];
115     facel            = _mm_set1_ps(fr->epsfac);
116     charge           = mdatoms->chargeA;
117
118     invsqrta         = fr->invsqrta;
119     dvda             = fr->dvda;
120     gbtabscale       = _mm_set1_ps(fr->gbtab.scale);
121     gbtab            = fr->gbtab.data;
122     gbinvepsdiff     = _mm_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
123
124     /* Avoid stupid compiler warnings */
125     jnrA = jnrB = jnrC = jnrD = 0;
126     j_coord_offsetA = 0;
127     j_coord_offsetB = 0;
128     j_coord_offsetC = 0;
129     j_coord_offsetD = 0;
130
131     outeriter        = 0;
132     inneriter        = 0;
133
134     for(iidx=0;iidx<4*DIM;iidx++)
135     {
136         scratch[iidx] = 0.0;
137     }
138
139     /* Start outer loop over neighborlists */
140     for(iidx=0; iidx<nri; iidx++)
141     {
142         /* Load shift vector for this list */
143         i_shift_offset   = DIM*shiftidx[iidx];
144
145         /* Load limits for loop over neighbors */
146         j_index_start    = jindex[iidx];
147         j_index_end      = jindex[iidx+1];
148
149         /* Get outer coordinate index */
150         inr              = iinr[iidx];
151         i_coord_offset   = DIM*inr;
152
153         /* Load i particle coords and add shift vector */
154         gmx_mm_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
155
156         fix0             = _mm_setzero_ps();
157         fiy0             = _mm_setzero_ps();
158         fiz0             = _mm_setzero_ps();
159
160         /* Load parameters for i particles */
161         iq0              = _mm_mul_ps(facel,_mm_load1_ps(charge+inr+0));
162         isai0            = _mm_load1_ps(invsqrta+inr+0);
163
164         /* Reset potential sums */
165         velecsum         = _mm_setzero_ps();
166         vgbsum           = _mm_setzero_ps();
167         dvdasum          = _mm_setzero_ps();
168
169         /* Start inner kernel loop */
170         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
171         {
172
173             /* Get j neighbor index, and coordinate index */
174             jnrA             = jjnr[jidx];
175             jnrB             = jjnr[jidx+1];
176             jnrC             = jjnr[jidx+2];
177             jnrD             = jjnr[jidx+3];
178             j_coord_offsetA  = DIM*jnrA;
179             j_coord_offsetB  = DIM*jnrB;
180             j_coord_offsetC  = DIM*jnrC;
181             j_coord_offsetD  = DIM*jnrD;
182
183             /* load j atom coordinates */
184             gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
185                                               x+j_coord_offsetC,x+j_coord_offsetD,
186                                               &jx0,&jy0,&jz0);
187
188             /* Calculate displacement vector */
189             dx00             = _mm_sub_ps(ix0,jx0);
190             dy00             = _mm_sub_ps(iy0,jy0);
191             dz00             = _mm_sub_ps(iz0,jz0);
192
193             /* Calculate squared distance and things based on it */
194             rsq00            = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
195
196             rinv00           = gmx_mm_invsqrt_ps(rsq00);
197
198             /* Load parameters for j particles */
199             jq0              = gmx_mm_load_4real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
200                                                               charge+jnrC+0,charge+jnrD+0);
201             isaj0            = gmx_mm_load_4real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
202                                                               invsqrta+jnrC+0,invsqrta+jnrD+0);
203
204             /**************************
205              * CALCULATE INTERACTIONS *
206              **************************/
207
208             r00              = _mm_mul_ps(rsq00,rinv00);
209
210             /* Compute parameters for interactions between i and j atoms */
211             qq00             = _mm_mul_ps(iq0,jq0);
212
213             /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
214             isaprod          = _mm_mul_ps(isai0,isaj0);
215             gbqqfactor       = _mm_xor_ps(signbit,_mm_mul_ps(qq00,_mm_mul_ps(isaprod,gbinvepsdiff)));
216             gbscale          = _mm_mul_ps(isaprod,gbtabscale);
217
218             /* Calculate generalized born table index - this is a separate table from the normal one,
219              * but we use the same procedure by multiplying r with scale and truncating to integer.
220              */
221             rt               = _mm_mul_ps(r00,gbscale);
222             gbitab           = _mm_cvttps_epi32(rt);
223 #ifdef __XOP__
224             gbeps            = _mm_frcz_ps(rt);
225 #else
226             gbeps            = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR));
227 #endif
228             gbitab           = _mm_slli_epi32(gbitab,2);
229
230             Y                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,0) );
231             F                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,1) );
232             G                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,2) );
233             H                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,3) );
234             _MM_TRANSPOSE4_PS(Y,F,G,H);
235             Fp               = _mm_macc_ps(gbeps,_mm_macc_ps(gbeps,H,G),F);
236             VV               = _mm_macc_ps(gbeps,Fp,Y);
237             vgb              = _mm_mul_ps(gbqqfactor,VV);
238
239             twogbeps         = _mm_add_ps(gbeps,gbeps);
240             FF               = _mm_macc_ps(_mm_macc_ps(twogbeps,H,G),gbeps,Fp);
241             fgb              = _mm_mul_ps(gbqqfactor,_mm_mul_ps(FF,gbscale));
242             dvdatmp          = _mm_mul_ps(minushalf,_mm_macc_ps(fgb,r00,vgb));
243             dvdasum          = _mm_add_ps(dvdasum,dvdatmp);
244             fjptrA           = dvda+jnrA;
245             fjptrB           = dvda+jnrB;
246             fjptrC           = dvda+jnrC;
247             fjptrD           = dvda+jnrD;
248             gmx_mm_increment_4real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,_mm_mul_ps(dvdatmp,_mm_mul_ps(isaj0,isaj0)));
249             velec            = _mm_mul_ps(qq00,rinv00);
250             felec            = _mm_mul_ps(_mm_msub_ps(velec,rinv00,fgb),rinv00);
251
252             /* Update potential sum for this i atom from the interaction with this j atom. */
253             velecsum         = _mm_add_ps(velecsum,velec);
254             vgbsum           = _mm_add_ps(vgbsum,vgb);
255
256             fscal            = felec;
257
258              /* Update vectorial force */
259             fix0             = _mm_macc_ps(dx00,fscal,fix0);
260             fiy0             = _mm_macc_ps(dy00,fscal,fiy0);
261             fiz0             = _mm_macc_ps(dz00,fscal,fiz0);
262
263             fjptrA             = f+j_coord_offsetA;
264             fjptrB             = f+j_coord_offsetB;
265             fjptrC             = f+j_coord_offsetC;
266             fjptrD             = f+j_coord_offsetD;
267             gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,
268                                                    _mm_mul_ps(dx00,fscal),
269                                                    _mm_mul_ps(dy00,fscal),
270                                                    _mm_mul_ps(dz00,fscal));
271
272             /* Inner loop uses 61 flops */
273         }
274
275         if(jidx<j_index_end)
276         {
277
278             /* Get j neighbor index, and coordinate index */
279             jnrlistA         = jjnr[jidx];
280             jnrlistB         = jjnr[jidx+1];
281             jnrlistC         = jjnr[jidx+2];
282             jnrlistD         = jjnr[jidx+3];
283             /* Sign of each element will be negative for non-real atoms.
284              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
285              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
286              */
287             dummy_mask = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
288             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
289             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
290             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
291             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
292             j_coord_offsetA  = DIM*jnrA;
293             j_coord_offsetB  = DIM*jnrB;
294             j_coord_offsetC  = DIM*jnrC;
295             j_coord_offsetD  = DIM*jnrD;
296
297             /* load j atom coordinates */
298             gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
299                                               x+j_coord_offsetC,x+j_coord_offsetD,
300                                               &jx0,&jy0,&jz0);
301
302             /* Calculate displacement vector */
303             dx00             = _mm_sub_ps(ix0,jx0);
304             dy00             = _mm_sub_ps(iy0,jy0);
305             dz00             = _mm_sub_ps(iz0,jz0);
306
307             /* Calculate squared distance and things based on it */
308             rsq00            = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
309
310             rinv00           = gmx_mm_invsqrt_ps(rsq00);
311
312             /* Load parameters for j particles */
313             jq0              = gmx_mm_load_4real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
314                                                               charge+jnrC+0,charge+jnrD+0);
315             isaj0            = gmx_mm_load_4real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
316                                                               invsqrta+jnrC+0,invsqrta+jnrD+0);
317
318             /**************************
319              * CALCULATE INTERACTIONS *
320              **************************/
321
322             r00              = _mm_mul_ps(rsq00,rinv00);
323             r00              = _mm_andnot_ps(dummy_mask,r00);
324
325             /* Compute parameters for interactions between i and j atoms */
326             qq00             = _mm_mul_ps(iq0,jq0);
327
328             /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
329             isaprod          = _mm_mul_ps(isai0,isaj0);
330             gbqqfactor       = _mm_xor_ps(signbit,_mm_mul_ps(qq00,_mm_mul_ps(isaprod,gbinvepsdiff)));
331             gbscale          = _mm_mul_ps(isaprod,gbtabscale);
332
333             /* Calculate generalized born table index - this is a separate table from the normal one,
334              * but we use the same procedure by multiplying r with scale and truncating to integer.
335              */
336             rt               = _mm_mul_ps(r00,gbscale);
337             gbitab           = _mm_cvttps_epi32(rt);
338 #ifdef __XOP__
339             gbeps            = _mm_frcz_ps(rt);
340 #else
341             gbeps            = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR));
342 #endif
343             gbitab           = _mm_slli_epi32(gbitab,2);
344
345             Y                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,0) );
346             F                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,1) );
347             G                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,2) );
348             H                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,3) );
349             _MM_TRANSPOSE4_PS(Y,F,G,H);
350             Fp               = _mm_macc_ps(gbeps,_mm_macc_ps(gbeps,H,G),F);
351             VV               = _mm_macc_ps(gbeps,Fp,Y);
352             vgb              = _mm_mul_ps(gbqqfactor,VV);
353
354             twogbeps         = _mm_add_ps(gbeps,gbeps);
355             FF               = _mm_macc_ps(_mm_macc_ps(twogbeps,H,G),gbeps,Fp);
356             fgb              = _mm_mul_ps(gbqqfactor,_mm_mul_ps(FF,gbscale));
357             dvdatmp          = _mm_mul_ps(minushalf,_mm_macc_ps(fgb,r00,vgb));
358             dvdasum          = _mm_add_ps(dvdasum,dvdatmp);
359             /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
360             fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
361             fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
362             fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
363             fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
364             gmx_mm_increment_4real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,_mm_mul_ps(dvdatmp,_mm_mul_ps(isaj0,isaj0)));
365             velec            = _mm_mul_ps(qq00,rinv00);
366             felec            = _mm_mul_ps(_mm_msub_ps(velec,rinv00,fgb),rinv00);
367
368             /* Update potential sum for this i atom from the interaction with this j atom. */
369             velec            = _mm_andnot_ps(dummy_mask,velec);
370             velecsum         = _mm_add_ps(velecsum,velec);
371             vgb              = _mm_andnot_ps(dummy_mask,vgb);
372             vgbsum           = _mm_add_ps(vgbsum,vgb);
373
374             fscal            = felec;
375
376             fscal            = _mm_andnot_ps(dummy_mask,fscal);
377
378              /* Update vectorial force */
379             fix0             = _mm_macc_ps(dx00,fscal,fix0);
380             fiy0             = _mm_macc_ps(dy00,fscal,fiy0);
381             fiz0             = _mm_macc_ps(dz00,fscal,fiz0);
382
383             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
384             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
385             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
386             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
387             gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,
388                                                    _mm_mul_ps(dx00,fscal),
389                                                    _mm_mul_ps(dy00,fscal),
390                                                    _mm_mul_ps(dz00,fscal));
391
392             /* Inner loop uses 62 flops */
393         }
394
395         /* End of innermost loop */
396
397         gmx_mm_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
398                                               f+i_coord_offset,fshift+i_shift_offset);
399
400         ggid                        = gid[iidx];
401         /* Update potential energies */
402         gmx_mm_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
403         gmx_mm_update_1pot_ps(vgbsum,kernel_data->energygrp_polarization+ggid);
404         dvdasum = _mm_mul_ps(dvdasum, _mm_mul_ps(isai0,isai0));
405         gmx_mm_update_1pot_ps(dvdasum,dvda+inr);
406
407         /* Increment number of inner iterations */
408         inneriter                  += j_index_end - j_index_start;
409
410         /* Outer loop uses 9 flops */
411     }
412
413     /* Increment number of outer iterations */
414     outeriter        += nri;
415
416     /* Update outer/inner flops */
417
418     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*9 + inneriter*62);
419 }
420 /*
421  * Gromacs nonbonded kernel:   nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_128_fma_single
422  * Electrostatics interaction: GeneralizedBorn
423  * VdW interaction:            None
424  * Geometry:                   Particle-Particle
425  * Calculate force/pot:        Force
426  */
427 void
428 nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_128_fma_single
429                     (t_nblist * gmx_restrict                nlist,
430                      rvec * gmx_restrict                    xx,
431                      rvec * gmx_restrict                    ff,
432                      t_forcerec * gmx_restrict              fr,
433                      t_mdatoms * gmx_restrict               mdatoms,
434                      nb_kernel_data_t * gmx_restrict        kernel_data,
435                      t_nrnb * gmx_restrict                  nrnb)
436 {
437     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
438      * just 0 for non-waters.
439      * Suffixes A,B,C,D refer to j loop unrolling done with AVX_128, e.g. for the four different
440      * jnr indices corresponding to data put in the four positions in the SIMD register.
441      */
442     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
443     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
444     int              jnrA,jnrB,jnrC,jnrD;
445     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
446     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
447     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
448     real             rcutoff_scalar;
449     real             *shiftvec,*fshift,*x,*f;
450     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
451     real             scratch[4*DIM];
452     __m128           fscal,rcutoff,rcutoff2,jidxall;
453     int              vdwioffset0;
454     __m128           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
455     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
456     __m128           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
457     __m128           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
458     __m128           velec,felec,velecsum,facel,crf,krf,krf2;
459     real             *charge;
460     __m128i          gbitab;
461     __m128           vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,twogbeps,dvdatmp;
462     __m128           minushalf = _mm_set1_ps(-0.5);
463     real             *invsqrta,*dvda,*gbtab;
464     __m128i          vfitab;
465     __m128i          ifour       = _mm_set1_epi32(4);
466     __m128           rt,vfeps,twovfeps,vftabscale,Y,F,G,H,Fp,VV,FF;
467     real             *vftab;
468     __m128           dummy_mask,cutoff_mask;
469     __m128           signbit = _mm_castsi128_ps( _mm_set1_epi32(0x80000000) );
470     __m128           one     = _mm_set1_ps(1.0);
471     __m128           two     = _mm_set1_ps(2.0);
472     x                = xx[0];
473     f                = ff[0];
474
475     nri              = nlist->nri;
476     iinr             = nlist->iinr;
477     jindex           = nlist->jindex;
478     jjnr             = nlist->jjnr;
479     shiftidx         = nlist->shift;
480     gid              = nlist->gid;
481     shiftvec         = fr->shift_vec[0];
482     fshift           = fr->fshift[0];
483     facel            = _mm_set1_ps(fr->epsfac);
484     charge           = mdatoms->chargeA;
485
486     invsqrta         = fr->invsqrta;
487     dvda             = fr->dvda;
488     gbtabscale       = _mm_set1_ps(fr->gbtab.scale);
489     gbtab            = fr->gbtab.data;
490     gbinvepsdiff     = _mm_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
491
492     /* Avoid stupid compiler warnings */
493     jnrA = jnrB = jnrC = jnrD = 0;
494     j_coord_offsetA = 0;
495     j_coord_offsetB = 0;
496     j_coord_offsetC = 0;
497     j_coord_offsetD = 0;
498
499     outeriter        = 0;
500     inneriter        = 0;
501
502     for(iidx=0;iidx<4*DIM;iidx++)
503     {
504         scratch[iidx] = 0.0;
505     }
506
507     /* Start outer loop over neighborlists */
508     for(iidx=0; iidx<nri; iidx++)
509     {
510         /* Load shift vector for this list */
511         i_shift_offset   = DIM*shiftidx[iidx];
512
513         /* Load limits for loop over neighbors */
514         j_index_start    = jindex[iidx];
515         j_index_end      = jindex[iidx+1];
516
517         /* Get outer coordinate index */
518         inr              = iinr[iidx];
519         i_coord_offset   = DIM*inr;
520
521         /* Load i particle coords and add shift vector */
522         gmx_mm_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
523
524         fix0             = _mm_setzero_ps();
525         fiy0             = _mm_setzero_ps();
526         fiz0             = _mm_setzero_ps();
527
528         /* Load parameters for i particles */
529         iq0              = _mm_mul_ps(facel,_mm_load1_ps(charge+inr+0));
530         isai0            = _mm_load1_ps(invsqrta+inr+0);
531
532         dvdasum          = _mm_setzero_ps();
533
534         /* Start inner kernel loop */
535         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
536         {
537
538             /* Get j neighbor index, and coordinate index */
539             jnrA             = jjnr[jidx];
540             jnrB             = jjnr[jidx+1];
541             jnrC             = jjnr[jidx+2];
542             jnrD             = jjnr[jidx+3];
543             j_coord_offsetA  = DIM*jnrA;
544             j_coord_offsetB  = DIM*jnrB;
545             j_coord_offsetC  = DIM*jnrC;
546             j_coord_offsetD  = DIM*jnrD;
547
548             /* load j atom coordinates */
549             gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
550                                               x+j_coord_offsetC,x+j_coord_offsetD,
551                                               &jx0,&jy0,&jz0);
552
553             /* Calculate displacement vector */
554             dx00             = _mm_sub_ps(ix0,jx0);
555             dy00             = _mm_sub_ps(iy0,jy0);
556             dz00             = _mm_sub_ps(iz0,jz0);
557
558             /* Calculate squared distance and things based on it */
559             rsq00            = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
560
561             rinv00           = gmx_mm_invsqrt_ps(rsq00);
562
563             /* Load parameters for j particles */
564             jq0              = gmx_mm_load_4real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
565                                                               charge+jnrC+0,charge+jnrD+0);
566             isaj0            = gmx_mm_load_4real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
567                                                               invsqrta+jnrC+0,invsqrta+jnrD+0);
568
569             /**************************
570              * CALCULATE INTERACTIONS *
571              **************************/
572
573             r00              = _mm_mul_ps(rsq00,rinv00);
574
575             /* Compute parameters for interactions between i and j atoms */
576             qq00             = _mm_mul_ps(iq0,jq0);
577
578             /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
579             isaprod          = _mm_mul_ps(isai0,isaj0);
580             gbqqfactor       = _mm_xor_ps(signbit,_mm_mul_ps(qq00,_mm_mul_ps(isaprod,gbinvepsdiff)));
581             gbscale          = _mm_mul_ps(isaprod,gbtabscale);
582
583             /* Calculate generalized born table index - this is a separate table from the normal one,
584              * but we use the same procedure by multiplying r with scale and truncating to integer.
585              */
586             rt               = _mm_mul_ps(r00,gbscale);
587             gbitab           = _mm_cvttps_epi32(rt);
588 #ifdef __XOP__
589             gbeps            = _mm_frcz_ps(rt);
590 #else
591             gbeps            = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR));
592 #endif
593             gbitab           = _mm_slli_epi32(gbitab,2);
594
595             Y                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,0) );
596             F                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,1) );
597             G                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,2) );
598             H                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,3) );
599             _MM_TRANSPOSE4_PS(Y,F,G,H);
600             Fp               = _mm_macc_ps(gbeps,_mm_macc_ps(gbeps,H,G),F);
601             VV               = _mm_macc_ps(gbeps,Fp,Y);
602             vgb              = _mm_mul_ps(gbqqfactor,VV);
603
604             twogbeps         = _mm_add_ps(gbeps,gbeps);
605             FF               = _mm_macc_ps(_mm_macc_ps(twogbeps,H,G),gbeps,Fp);
606             fgb              = _mm_mul_ps(gbqqfactor,_mm_mul_ps(FF,gbscale));
607             dvdatmp          = _mm_mul_ps(minushalf,_mm_macc_ps(fgb,r00,vgb));
608             dvdasum          = _mm_add_ps(dvdasum,dvdatmp);
609             fjptrA           = dvda+jnrA;
610             fjptrB           = dvda+jnrB;
611             fjptrC           = dvda+jnrC;
612             fjptrD           = dvda+jnrD;
613             gmx_mm_increment_4real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,_mm_mul_ps(dvdatmp,_mm_mul_ps(isaj0,isaj0)));
614             velec            = _mm_mul_ps(qq00,rinv00);
615             felec            = _mm_mul_ps(_mm_msub_ps(velec,rinv00,fgb),rinv00);
616
617             fscal            = felec;
618
619              /* Update vectorial force */
620             fix0             = _mm_macc_ps(dx00,fscal,fix0);
621             fiy0             = _mm_macc_ps(dy00,fscal,fiy0);
622             fiz0             = _mm_macc_ps(dz00,fscal,fiz0);
623
624             fjptrA             = f+j_coord_offsetA;
625             fjptrB             = f+j_coord_offsetB;
626             fjptrC             = f+j_coord_offsetC;
627             fjptrD             = f+j_coord_offsetD;
628             gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,
629                                                    _mm_mul_ps(dx00,fscal),
630                                                    _mm_mul_ps(dy00,fscal),
631                                                    _mm_mul_ps(dz00,fscal));
632
633             /* Inner loop uses 59 flops */
634         }
635
636         if(jidx<j_index_end)
637         {
638
639             /* Get j neighbor index, and coordinate index */
640             jnrlistA         = jjnr[jidx];
641             jnrlistB         = jjnr[jidx+1];
642             jnrlistC         = jjnr[jidx+2];
643             jnrlistD         = jjnr[jidx+3];
644             /* Sign of each element will be negative for non-real atoms.
645              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
646              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
647              */
648             dummy_mask = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
649             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
650             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
651             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
652             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
653             j_coord_offsetA  = DIM*jnrA;
654             j_coord_offsetB  = DIM*jnrB;
655             j_coord_offsetC  = DIM*jnrC;
656             j_coord_offsetD  = DIM*jnrD;
657
658             /* load j atom coordinates */
659             gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
660                                               x+j_coord_offsetC,x+j_coord_offsetD,
661                                               &jx0,&jy0,&jz0);
662
663             /* Calculate displacement vector */
664             dx00             = _mm_sub_ps(ix0,jx0);
665             dy00             = _mm_sub_ps(iy0,jy0);
666             dz00             = _mm_sub_ps(iz0,jz0);
667
668             /* Calculate squared distance and things based on it */
669             rsq00            = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
670
671             rinv00           = gmx_mm_invsqrt_ps(rsq00);
672
673             /* Load parameters for j particles */
674             jq0              = gmx_mm_load_4real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
675                                                               charge+jnrC+0,charge+jnrD+0);
676             isaj0            = gmx_mm_load_4real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
677                                                               invsqrta+jnrC+0,invsqrta+jnrD+0);
678
679             /**************************
680              * CALCULATE INTERACTIONS *
681              **************************/
682
683             r00              = _mm_mul_ps(rsq00,rinv00);
684             r00              = _mm_andnot_ps(dummy_mask,r00);
685
686             /* Compute parameters for interactions between i and j atoms */
687             qq00             = _mm_mul_ps(iq0,jq0);
688
689             /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
690             isaprod          = _mm_mul_ps(isai0,isaj0);
691             gbqqfactor       = _mm_xor_ps(signbit,_mm_mul_ps(qq00,_mm_mul_ps(isaprod,gbinvepsdiff)));
692             gbscale          = _mm_mul_ps(isaprod,gbtabscale);
693
694             /* Calculate generalized born table index - this is a separate table from the normal one,
695              * but we use the same procedure by multiplying r with scale and truncating to integer.
696              */
697             rt               = _mm_mul_ps(r00,gbscale);
698             gbitab           = _mm_cvttps_epi32(rt);
699 #ifdef __XOP__
700             gbeps            = _mm_frcz_ps(rt);
701 #else
702             gbeps            = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR));
703 #endif
704             gbitab           = _mm_slli_epi32(gbitab,2);
705
706             Y                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,0) );
707             F                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,1) );
708             G                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,2) );
709             H                = _mm_load_ps( gbtab + _mm_extract_epi32(gbitab,3) );
710             _MM_TRANSPOSE4_PS(Y,F,G,H);
711             Fp               = _mm_macc_ps(gbeps,_mm_macc_ps(gbeps,H,G),F);
712             VV               = _mm_macc_ps(gbeps,Fp,Y);
713             vgb              = _mm_mul_ps(gbqqfactor,VV);
714
715             twogbeps         = _mm_add_ps(gbeps,gbeps);
716             FF               = _mm_macc_ps(_mm_macc_ps(twogbeps,H,G),gbeps,Fp);
717             fgb              = _mm_mul_ps(gbqqfactor,_mm_mul_ps(FF,gbscale));
718             dvdatmp          = _mm_mul_ps(minushalf,_mm_macc_ps(fgb,r00,vgb));
719             dvdasum          = _mm_add_ps(dvdasum,dvdatmp);
720             /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
721             fjptrA             = (jnrlistA>=0) ? dvda+jnrA : scratch;
722             fjptrB             = (jnrlistB>=0) ? dvda+jnrB : scratch;
723             fjptrC             = (jnrlistC>=0) ? dvda+jnrC : scratch;
724             fjptrD             = (jnrlistD>=0) ? dvda+jnrD : scratch;
725             gmx_mm_increment_4real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,_mm_mul_ps(dvdatmp,_mm_mul_ps(isaj0,isaj0)));
726             velec            = _mm_mul_ps(qq00,rinv00);
727             felec            = _mm_mul_ps(_mm_msub_ps(velec,rinv00,fgb),rinv00);
728
729             fscal            = felec;
730
731             fscal            = _mm_andnot_ps(dummy_mask,fscal);
732
733              /* Update vectorial force */
734             fix0             = _mm_macc_ps(dx00,fscal,fix0);
735             fiy0             = _mm_macc_ps(dy00,fscal,fiy0);
736             fiz0             = _mm_macc_ps(dz00,fscal,fiz0);
737
738             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
739             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
740             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
741             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
742             gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,
743                                                    _mm_mul_ps(dx00,fscal),
744                                                    _mm_mul_ps(dy00,fscal),
745                                                    _mm_mul_ps(dz00,fscal));
746
747             /* Inner loop uses 60 flops */
748         }
749
750         /* End of innermost loop */
751
752         gmx_mm_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
753                                               f+i_coord_offset,fshift+i_shift_offset);
754
755         dvdasum = _mm_mul_ps(dvdasum, _mm_mul_ps(isai0,isai0));
756         gmx_mm_update_1pot_ps(dvdasum,dvda+inr);
757
758         /* Increment number of inner iterations */
759         inneriter                  += j_index_end - j_index_start;
760
761         /* Outer loop uses 7 flops */
762     }
763
764     /* Increment number of outer iterations */
765     outeriter        += nri;
766
767     /* Update outer/inner flops */
768
769     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*60);
770 }