tadded Dirichlet and Neumann BC at the dynamic top wall - sphere - GPU-based 3D discrete element method algorithm with optional fluid coupling
(HTM) git clone git://src.adamsgaard.dk/sphere
(DIR) Log
(DIR) Files
(DIR) Refs
(DIR) LICENSE
---
(DIR) commit 4118d84337bcf90f6d7914596e600862afc41c7e
(DIR) parent 712284f13744c8f0ffdfd0102a2f081b90c3b423
(HTM) Author: Anders Damsgaard <anders.damsgaard@geo.au.dk>
Date: Fri, 19 Sep 2014 14:55:05 +0200
added Dirichlet and Neumann BC at the dynamic top wall
Diffstat:
M python/shear-starter.py | 4 ++--
M src/device.cu | 78 ++++++-------------------------
M src/navierstokes.cuh | 7 ++++++-
3 files changed, 22 insertions(+), 67 deletions(-)
---
(DIR) diff --git a/python/shear-starter.py b/python/shear-starter.py
t@@ -42,8 +42,8 @@ sim.zeroKinematics()
sim.shear(1.0/20.0)
if fluid:
- sim.num[2] *= 2
- sim.L[2] *= 2.0
+ #sim.num[2] *= 2
+ #sim.L[2] *= 2.0
sim.initFluid(mu = 1.787e-6, p = 600.0e3, hydrostatic = True)
#sim.initFluid(mu = 17.87e-4, p = 1.0e5, hydrostatic = True)
sim.setFluidBottomNoFlow()
(DIR) diff --git a/src/device.cu b/src/device.cu
t@@ -841,9 +841,10 @@ __host__ void DEM::startTime()
double t_ratio; // ration between time flow in model vs. reality
// Index of dynamic top wall (if it exists)
- unsigned int wall0_iz;
+ unsigned int wall0_iz = 10000000;
// weight of fluid between two cells in z direction
const Float dp_dz = fabs(params.rho_f*params.g[2]*grid.L[2]/grid.num[2]);
+ std::cout << "dp_dz = " << dp_dz << std::endl;
// Write a log file of the number of iterations it took before
// convergence in the fluid solver
t@@ -1184,6 +1185,14 @@ __host__ void DEM::startTime()
dp_dz);
cudaThreadSynchronize();
checkForCudaErrorsIter("Post setNSepsilonAtTopWall", iter);
+#ifdef REPORT_EPSILON
+ std::cout
+ << "\n@@@@@@ TIME STEP " << iter << " @@@@@@"
+ << "\n###### EPSILON setNSepsilonAtTopWall "
+ << "######" << std::endl;
+ transferNSepsilonFromGlobalDeviceMemory();
+ printNSarray(stdout, ns.epsilon, "epsilon");
+#endif
}
// Modulate the pressures at the upper boundary cells
t@@ -1480,45 +1489,6 @@ __host__ void DEM::startTime()
printNSarray(stdout, ns.epsilon, "epsilon");
#endif
- /*smoothing<Float><<<dimGridFluid, dimBlockFluid>>>(
- dev_ns_epsilon,
- ns.bc_bot, ns.bc_top);
- cudaThreadSynchronize();
- checkForCudaErrorsIter("Post smoothing", iter);
-
-#ifdef REPORT_EPSILON
- std::cout << "\n###### JACOBI ITERATION "
- << nijac << " after smoothing(epsilon) ######"
- << std::endl;
- transferNSepsilonFromGlobalDeviceMemory();
- printNSarray(stdout, ns.epsilon, "epsilon");
-#endif
-
- setNSghostNodes<Float><<<dimGridFluid, dimBlockFluid>>>(
- dev_ns_epsilon,
- ns.bc_bot, ns.bc_top);
- cudaThreadSynchronize();
- checkForCudaErrorsIter("Post setNSghostNodesEpsilon(3)",
- iter);
- */
-
- /*if (report_epsilon == 1) {
- std::cout << "\n###### JACOBI ITERATION "
- << nijac
- << " after setNSghostNodesEpsilon(epsilon,3) ######"
- << std::endl;
- transferNSepsilonFromGlobalDeviceMemory();
- printNSarray(stdout, ns.epsilon, "epsilon");
- }*/
-
- // Store old values
- /*copyValues<Float><<<dimGridFluid, dimBlockFluid>>>(
- dev_ns_epsilon,
- dev_ns_epsilon_old);
- cudaThreadSynchronize();
- checkForCudaErrorsIter
- ("Post copyValues (epsilon->epsilon_old)", iter);*/
-
// Perform a single Jacobi iteration
if (PROFILING == 1)
startTimer(&kernel_tic);
t@@ -1529,7 +1499,8 @@ __host__ void DEM::startTime()
dev_ns_f,
ns.bc_bot,
ns.bc_top,
- ns.theta);
+ ns.theta,
+ wall0_iz);
cudaThreadSynchronize();
if (PROFILING == 1)
stopTimer(&kernel_tic, &kernel_toc, &kernel_elapsed,
t@@ -1549,11 +1520,6 @@ __host__ void DEM::startTime()
iter);
}
- // Flip flop: swap new and current array pointers
- /*Float* tmp = dev_ns_epsilon;
- dev_ns_epsilon = dev_ns_epsilon_new;
- dev_ns_epsilon_new = tmp;*/
-
// Copy new values to current values
copyValues<Float><<<dimGridFluid, dimBlockFluid>>>(
dev_ns_epsilon_new,
t@@ -1562,15 +1528,6 @@ __host__ void DEM::startTime()
checkForCudaErrorsIter
("Post copyValues (epsilon_new->epsilon)", iter);
- /*findNormalizedResiduals<<<dimGridFluid, dimBlockFluid>>>(
- dev_ns_epsilon_old,
- dev_ns_epsilon,
- dev_ns_norm,
- ns.bc_bot, ns.bc_top);
- cudaThreadSynchronize();
- checkForCudaErrorsIter("Post findNormalizedResiduals",
- iter);*/
-
#ifdef REPORT_EPSILON
std::cout << "\n###### JACOBI ITERATION "
<< nijac << " after jacobiIterationNS ######"
t@@ -1688,15 +1645,8 @@ __host__ void DEM::startTime()
stopTimer(&kernel_tic, &kernel_toc, &kernel_elapsed,
&t_updateNSvelocityPressure);
checkForCudaErrorsIter("Post updateNSvelocity", iter);
- }
-
- /*std::cout << "\n###### ITERATION "
- << iter << " ######" << std::endl;
- transferNSepsilonFromGlobalDeviceMemory();
- printNSarray(stdout, ns.epsilon, "epsilon");*/
- //transferNSepsilonNewFromGlobalDeviceMemory();
- //printNSarray(stdout, ns.epsilon_new, "epsilon_new");
- }
+ } // end iter % ns.dem == 0
+ } // end navierstokes == 1
if (np > 0) {
// Update particle kinematics
(DIR) diff --git a/src/navierstokes.cuh b/src/navierstokes.cuh
t@@ -474,6 +474,7 @@ __global__ void setNSepsilonAtTopWall(
// minus the contribution due to the fluid weight.
// p_iz+1 = p_iz - rho_f*g*dz
const Float p = value - dp_dz;
+ printf("%d,%d,%d\tp = %f\n", x,y,z, p);
__syncthreads();
dev_ns_epsilon[cellidx] = p;
t@@ -2669,7 +2670,8 @@ __global__ void jacobiIterationNS(
const Float* __restrict__ dev_ns_f,
const int bc_bot,
const int bc_top,
- const Float theta)
+ const Float theta,
+ const unsigned int wall0_iz)
{
// 3D thread index
const unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
t@@ -2738,6 +2740,9 @@ __global__ void jacobiIterationNS(
+ dxdx*dydy*(e_zn + e_zp))
/(2.0*(dxdx*dydy + dxdx*dzdz + dydy*dzdz));
+ if (z == wall0_iz)
+ e_new = e;
+
// New value of epsilon in 1D update
//const Float e_new = (e_zp + e_zn - dz*dz*f)/2.0;