PME GPU/CUDA data framework.
[alexxy/gromacs.git] / src / gromacs / ewald / pme-only.cpp
index 66a7728b24b6d5f73b342cb249b7f0d88057347e..75eb9fe2a04c42b77108e84ea49ea5b66f73d004 100644 (file)
@@ -122,8 +122,13 @@ static void gmx_pmeonly_switch(int *npmedata, struct gmx_pme_t ***pmedata,
             pme->nky == grid_size[YY] &&
             pme->nkz == grid_size[ZZ])
         {
+            /* Here we have found an existing PME data structure that suits us.
+             * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
+             * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
+             * So, just some grid size updates in the GPU kernel parameters.
+             */
+            gmx_pme_reinit(&((*pmedata)[ind]), cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
             *pme_ret = pme;
-
             return;
         }
 
@@ -183,6 +188,7 @@ int gmx_pmeonly(struct gmx_pme_t *pme,
         do
         {
             /* Domain decomposition */
+            bool atomSetChanged = false;
             ret = gmx_pme_recv_coeffs_coords(pme_pp,
                                              &natoms,
                                              &chargeA, &chargeB,
@@ -193,7 +199,10 @@ int gmx_pmeonly(struct gmx_pme_t *pme,
                                              &lambda_q, &lambda_lj,
                                              &bEnerVir,
                                              &step,
-                                             grid_switch, &ewaldcoeff_q, &ewaldcoeff_lj);
+                                             grid_switch,
+                                             &ewaldcoeff_q,
+                                             &ewaldcoeff_lj,
+                                             &atomSetChanged);
 
             if (ret == pmerecvqxSWITCHGRID)
             {
@@ -201,6 +210,11 @@ int gmx_pmeonly(struct gmx_pme_t *pme,
                 gmx_pmeonly_switch(&npmedata, &pmedata, grid_switch, ewaldcoeff_q, ewaldcoeff_lj, cr, ir, &pme);
             }
 
+            if (atomSetChanged)
+            {
+                gmx_pme_reinit_atoms(pme, natoms, chargeA);
+            }
+
             if (ret == pmerecvqxRESETCOUNTERS)
             {
                 /* Reset the cycle and flop counters */