deal.II version 9.7.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-86.h
Go to the documentation of this file.
1 0.0,
797 *   /* end time */ 1.0,
798 *   /* initial time step */ 0.025)
799 *   , initial_global_refinement(5)
800 *   , max_delta_refinement_level(2)
801 *   , mesh_adaptation_frequency(0)
802 *   , right_hand_side_function("/Heat Equation/Right hand side")
803 *   , initial_value_function("/Heat Equation/Initial value")
804 *   , boundary_values_function("/Heat Equation/Boundary values")
805 *   {
806 *   enter_subsection("Time stepper");
807 *   {
808 *   enter_my_subsection(this->prm);
809 *   {
810 *   time_stepper_data.add_parameters(this->prm);
811 *   }
812 *   leave_my_subsection(this->prm);
813 *   }
814 *   leave_subsection();
815 *  
816 *   add_parameter("Initial global refinement",
817 *   initial_global_refinement,
818 *   "Number of times the mesh is refined globally before "
819 *   "starting the time stepping.");
820 *   add_parameter("Maximum delta refinement level",
821 *   max_delta_refinement_level,
822 *   "Maximum number of local refinement levels.");
823 *   add_parameter("Mesh adaptation frequency",
824 *   mesh_adaptation_frequency,
825 *   "When to adapt the mesh.");
826 *   }
827 *  
828 *  
829 *  
830 * @endcode
831 *
832 *
833 * <a name="step_86-TheHeatEquationsetup_systemfunction"></a>
834 * <h4>The HeatEquation::setup_system() function</h4>
835 *
836
837 *
838 * This function is not very different from what we do in many other programs
839 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
840 * information about then, build constraint objects (recalling that we
841 * put hanging node constraints into their separate object), and then
842 * also build an AffineConstraint object that contains both the hanging
843 * node constraints as well as constraints corresponding to zero Dirichlet
844 * boundary conditions. This last object is needed since we impose the
845 * constraints through algebraic equations. While technically it would be
846 * possible to use the time derivative of the boundary function as a boundary
847 * conditions for the time derivative of the solution, this is not done here.
848 * Instead, we impose the boundary conditions through algebraic equations, and
849 * therefore the time derivative of the boundary conditions is not part of
850 * the algebraic system, and we need zero boundary conditions on the time
851 * derivative of the solution when computing the residual. We use the
852 * `homogeneous_constraints` object for this purpose.
853 *
854
855 *
856 * Note one detail here: The function
857 * AffineConstraints::make_consistent_in_parallel() might expand the
858 * underlying IndexSet for locally stored lines of the
859 * `hanging_node_constraints` object depending on how constraints are set up
860 * in parallel. Thus, at the point where we want to create a second affine
861 * constraints object that gets the information from the hanging node
862 * constraints, we need to make sure to use the local lines stored in the
863 * hanging node constraints, not the `locally_relevant_dofs` that object was
864 * originally initialized to. If this is not respected, errors might appear
865 * during the course of the simulation.
866 *
867
868 *
869 * Finally, we create the actual non-homogeneous `current_constraints` by
870 * calling `update_current_constraints). These are also used during the
871 * assembly and during the residual evaluation.
872 *
873 * @code
874 *   template <int dim>
875 *   void HeatEquation<dim>::setup_system(const double time)
876 *   {
877 *   TimerOutput::Scope t(computing_timer, "setup system");
878 *  
879 *   dof_handler.distribute_dofs(fe);
880 *   pcout << std::endl
881 *   << "Number of active cells: " << triangulation.n_active_cells()
882 *   << std::endl
883 *   << "Number of degrees of freedom: " << dof_handler.n_dofs()
884 *   << std::endl
885 *   << std::endl;
886 *  
887 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
888 *   locally_relevant_dofs =
890 *  
891 *  
892 *   hanging_node_constraints.clear();
893 *   hanging_node_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
895 *   hanging_node_constraints);
896 *   hanging_node_constraints.make_consistent_in_parallel(locally_owned_dofs,
897 *   locally_relevant_dofs,
898 *   mpi_communicator);
899 *   hanging_node_constraints.close();
900 *  
901 *  
902 *   homogeneous_constraints.clear();
903 *   homogeneous_constraints.reinit(locally_owned_dofs,
904 *   hanging_node_constraints.get_local_lines());
905 *   homogeneous_constraints.merge(hanging_node_constraints);
907 *   0,
909 *   homogeneous_constraints);
910 *   homogeneous_constraints.make_consistent_in_parallel(
911 *   locally_owned_dofs,
912 *   hanging_node_constraints.get_local_lines(),
913 *   mpi_communicator);
914 *   homogeneous_constraints.close();
915 *  
916 *  
917 *   update_current_constraints(time);
918 *  
919 *  
920 * @endcode
921 *
922 * The final block of code resets and initializes the matrix object with
923 * the appropriate sparsity pattern. Recall that we do not store solution
924 * vectors in this class (the time integrator object does that internally)
925 * and so do not have to resize and initialize them either.
926 *
927 * @code
928 *   DynamicSparsityPattern dsp(locally_relevant_dofs);
929 *   DoFTools::make_sparsity_pattern(dof_handler,
930 *   dsp,
931 *   homogeneous_constraints,
932 *   false);
934 *   locally_owned_dofs,
935 *   mpi_communicator,
936 *   locally_relevant_dofs);
937 *  
938 *   jacobian_matrix.reinit(locally_owned_dofs,
939 *   locally_owned_dofs,
940 *   dsp,
941 *   mpi_communicator);
942 *   }
943 *  
944 *  
945 * @endcode
946 *
947 *
948 * <a name="step_86-TheHeatEquationoutput_resultsfunction"></a>
949 * <h4>The HeatEquation::output_results() function</h4>
950 *
951
952 *
953 * This function is called from "monitor" function that is called in turns
954 * by the time stepper in each time step. We use it to write the solution to a
955 * file, and provide graphical output through paraview or visit. We also write
956 * a pvd file, which groups all metadata about the `.vtu` files into a single
957 * file that can be used to load the full time dependent solution in paraview.
958 *
959 * @code
960 *   template <int dim>
961 *   void HeatEquation<dim>::output_results(const double time,
962 *   const unsigned int timestep_number,
963 *   const PETScWrappers::MPI::Vector &y)
964 *   {
965 *   TimerOutput::Scope t(computing_timer, "output results");
966 *  
967 *   DataOut<dim> data_out;
968 *   data_out.attach_dof_handler(dof_handler);
969 *   data_out.add_data_vector(y, "U");
970 *   data_out.build_patches();
971 *  
972 *   data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
973 *  
974 *   const std::string filename =
975 *   "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
976 *   data_out.write_vtu_in_parallel(filename, mpi_communicator);
977 *  
978 *   if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
979 *   {
980 *   static std::vector<std::pair<double, std::string>> times_and_names;
981 *   times_and_names.emplace_back(time, filename);
982 *  
983 *   std::ofstream pvd_output("solution.pvd");
984 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
985 *   }
986 *   }
987 *  
988 *  
989 *  
990 * @endcode
991 *
992 *
993 * <a name="step_86-TheHeatEquationimplicit_functionfunction"></a>
994 * <h4>The HeatEquation::implicit_function() function</h4>
995 *
996
997 *
998 * As discussed in the introduction, we describe the ODE system to the time
999 * stepper via its residual,
1000 * @f[
1001 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1002 * @f]
1003 * The following function computes it, given vectors for @f$U,\dot U@f$ that
1004 * we will denote by `y` and `y_dot` because that's how they are called
1005 * in the documentation of the PETScWrappers::TimeStepper class.
1006 *
1007
1008 *
1009 * At the top of the function, we do the usual set up when computing
1010 * integrals. We face two minor difficulties here: the first is that
1011 * the `y` and `y_dot` vectors we get as input are read only, but we
1012 * need to make sure they satisfy the correct boundary conditions and so
1013 * have to set elements in these vectors. The second is that we need to
1014 * compute the residual, and therefore in general we need to evaluate solution
1015 * values and gradients inside locally owned cells, and for this need access
1016 * to degrees of freedom which may be owned by neighboring processors. To
1017 * address these issues, we create (non-ghosted) writable copies of the input
1018 * vectors, apply boundary conditions and hanging node current_constraints;
1019 * and then copy these vectors to ghosted vectors before we can do anything
1020 * sensible with them.
1021 *
1022 * @code
1023 *   template <int dim>
1024 *   void
1025 *   HeatEquation<dim>::implicit_function(const double time,
1026 *   const PETScWrappers::MPI::Vector &y,
1027 *   const PETScWrappers::MPI::Vector &y_dot,
1028 *   PETScWrappers::MPI::Vector &residual)
1029 *   {
1030 *   TimerOutput::Scope t(computing_timer, "implicit function");
1031 *  
1032 *   PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1033 *   mpi_communicator);
1034 *   PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1035 *   mpi_communicator);
1036 *   tmp_solution = y;
1037 *   tmp_solution_dot = y_dot;
1038 *  
1039 *   update_current_constraints(time);
1040 *   current_constraints.distribute(tmp_solution);
1041 *   homogeneous_constraints.distribute(tmp_solution_dot);
1042 *  
1043 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1044 *   locally_relevant_dofs,
1045 *   mpi_communicator);
1046 *   PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1047 *   locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1048 *   locally_relevant_solution = tmp_solution;
1049 *   locally_relevant_solution_dot = tmp_solution_dot;
1050 *  
1051 *  
1052 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1053 *   FEValues<dim> fe_values(fe,
1054 *   quadrature_formula,
1055 *   update_values | update_gradients |
1056 *   update_quadrature_points | update_JxW_values);
1057 *  
1058 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1059 *   const unsigned int n_q_points = quadrature_formula.size();
1060 *  
1061 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1062 *  
1063 *   std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1064 *   std::vector<double> solution_dot_values(n_q_points);
1065 *  
1066 *   Vector<double> cell_residual(dofs_per_cell);
1067 *  
1068 *   right_hand_side_function.set_time(time);
1069 *  
1070 * @endcode
1071 *
1072 * Now for computing the actual residual. Recall that we wan to compute the
1073 * vector
1074 * @f[
1075 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1076 * @f]
1077 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1078 * is not efficient. Instead, recall (by writing out how the elements of
1079 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1080 * @f$i@f$th element of the residual vector is given by
1081 * @f{align*}{
1082 * R(t,U,\dot U)_i
1083 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1084 * {\partial U_j(t)}{\partial t}
1085 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1086 * \varphi_j(\mathbf x, t) U_j(t)
1087 * - \int_\Omega \varphi_i f(\mathbf x, t)
1088 * \\ &=
1089 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1090 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1091 * u_h(\mathbf x, t)
1092 * - \int_\Omega \varphi_i f(\mathbf x, t).
1093 * @f}
1094 * We can compute these integrals efficiently by breaking them up into
1095 * a sum over all cells and then applying quadrature. For the integrand,
1096 * we need to evaluate the solution and its gradient at the quadrature
1097 * points within each locally owned cell, and for this, we need also
1098 * access to degrees of freedom that may be owned by neighboring
1099 * processors. We therefore use the locally_relevant_solution and and
1100 * locally_relevant_solution_dot vectors.
1101 *
1102 * @code
1103 *   residual = 0;
1104 *   for (const auto &cell : dof_handler.active_cell_iterators())
1105 *   if (cell->is_locally_owned())
1106 *   {
1107 *   fe_values.reinit(cell);
1108 *  
1109 *   fe_values.get_function_gradients(locally_relevant_solution,
1110 *   solution_gradients);
1111 *   fe_values.get_function_values(locally_relevant_solution_dot,
1112 *   solution_dot_values);
1113 *  
1114 *   cell->get_dof_indices(local_dof_indices);
1115 *  
1116 *   cell_residual = 0;
1117 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1118 *   for (const unsigned int i : fe_values.dof_indices())
1119 *   {
1120 *   cell_residual(i) +=
1121 *   (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1122 *   solution_dot_values[q] // u(x_q)
1123 *   + // +
1124 *   fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1125 *   solution_gradients[q] // grad u(x_q)
1126 *   - // -
1127 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1128 *   right_hand_side_function.value(
1129 *   fe_values.quadrature_point(q))) // f(x_q)]
1130 *   * fe_values.JxW(q); // * dx
1131 *   }
1132 *   current_constraints.distribute_local_to_global(cell_residual,
1133 *   local_dof_indices,
1134 *   residual);
1135 *   }
1136 *   residual.compress(VectorOperation::add);
1137 *  
1138 * @endcode
1139 *
1140 * The end result of the operations above is a vector that contains the
1141 * residual vector, having taken into account the constraints due to
1142 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1143 * used `current_constraints.distribute_local_to_global()` to add the
1144 * local contributions to the global vector. At the end of the day, the
1145 * residual vector @f$r@f$ will be used in the solution of linear systems
1146 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1147 * below. We want to achieve that for algebraic components, the algebraic
1148 * components of @f$z@f$ have predictable values that achieve the purposes
1149 * discussed in the following. We do this by ensuring that the entries
1150 * corresponding to algebraic components in the residual @f$r@f$ have specific
1151 * values, and then we will do the same in the next function for the
1152 * matrix; for this, you will have to know that the rows and columns
1153 * of the matrix corresponding to constrained entries are zero with the
1154 * exception of the diagonal entries. We will manually set that diagonal
1155 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1156 *
1157
1158 *
1159 * From the point of view of the residual vector, if the input `y`
1160 * vector does not contain the correct values on constrained degrees of
1161 * freedom (hanging nodes or boundary conditions), we need to communicate
1162 * this to the time stepper, and we do so by setting the residual to the
1163 * actual difference between the input `y` vector and the our local copy of
1164 * it, in which we have applied the constraints (see the top of the
1165 * function where we called `current_constraints.distribute(tmp_solution)`
1166 * and a similar operation on the time derivative). Since we have made a
1167 * copy of the input vector for this purpose, we use it to compute the
1168 * residual value. However, there is a difference between hanging nodes
1169 * constraints and boundary conditions: we do not want to make hanging node
1170 * constraints actually depend on their dependent degrees of freedom, since
1171 * this would imply that we are actually solving for the dependent degrees
1172 * of freedom. This is not what we are actually doing, however, since
1173 * hanging nodes are not actually solved for. They are eliminated from the
1174 * system by the call to AffineConstraints::distribute_local_to_global()
1175 * above. From the point of view of the Jacobian matrix, we are effectively
1176 * setting hanging nodes to an artificial value (usually zero), and we
1177 * simply want to make sure that we solve for those degrees of freedom a
1178 * posteriori, by calling the function AffineConstraints::distribute().
1179 *
1180
1181 *
1182 * Here we therefore check that the residual is equal to the input value on
1183 * the constrained dofs corresponding to hanging nodes (i.e., those for
1184 * which the lines of the `current_constraints` contain at least one other
1185 * entry), and to the difference between the input vector and the actual
1186 * solution on those constraints that correspond to boundary conditions.
1187 *
1188 * @code
1189 *   for (const auto &c : current_constraints.get_lines())
1190 *   if (locally_owned_dofs.is_element(c.index))
1191 *   {
1192 *   if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1193 *   residual[c.index] = y[c.index] - tmp_solution[c.index];
1194 *   else /* has dependencies -> a hanging node */
1195 *   residual[c.index] = y[c.index];
1196 *   }
1197 *   residual.compress(VectorOperation::insert);
1198 *   }
1199 *  
1200 *  
1201 * @endcode
1202 *
1203 *
1204 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1205 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1206 *
1207
1208 *
1209 * The next operation is to compute the "Jacobian", which PETSc TS defines
1210 * as the matrix
1211 * @f[
1212 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1213 * R}{\partial \dot y}
1214 * @f]
1215 * which, for the current linear problem, is simply
1216 * @f[
1217 * J_\alpha = A + \alpha M
1218 * @f]
1219 * and which is in particular independent of time and the current solution
1220 * vectors @f$y@f$ and @f$\dot y@f$.
1221 *
1222
1223 *
1224 * Having seen the assembly of matrices before, there is little that should
1225 * surprise you in the actual assembly here:
1226 *
1227 * @code
1228 *   template <int dim>
1229 *   void HeatEquation<dim>::assemble_implicit_jacobian(
1230 *   const double /* time */,
1231 *   const PETScWrappers::MPI::Vector & /* y */,
1232 *   const PETScWrappers::MPI::Vector & /* y_dot */,
1233 *   const double alpha)
1234 *   {
1235 *   TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1236 *  
1237 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1238 *   FEValues<dim> fe_values(fe,
1239 *   quadrature_formula,
1240 *   update_values | update_gradients |
1241 *   update_quadrature_points | update_JxW_values);
1242 *  
1243 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1244 *  
1245 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1246 *  
1247 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1248 *  
1249 *   jacobian_matrix = 0;
1250 *   for (const auto &cell : dof_handler.active_cell_iterators())
1251 *   if (cell->is_locally_owned())
1252 *   {
1253 *   fe_values.reinit(cell);
1254 *  
1255 *   cell->get_dof_indices(local_dof_indices);
1256 *  
1257 *   cell_matrix = 0;
1258 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1259 *   for (const unsigned int i : fe_values.dof_indices())
1260 *   for (const unsigned int j : fe_values.dof_indices())
1261 *   {
1262 *   cell_matrix(i, j) +=
1263 *   (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1264 *   fe_values.shape_grad(j, q) // grad phi_j(x_q)
1265 *   + alpha *
1266 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1267 *   fe_values.shape_value(j, q) // phi_j(x_q)
1268 *   ) *
1269 *   fe_values.JxW(q); // * dx
1270 *   }
1271 *   current_constraints.distribute_local_to_global(cell_matrix,
1272 *   local_dof_indices,
1273 *   jacobian_matrix);
1274 *   }
1275 *   jacobian_matrix.compress(VectorOperation::add);
1276 *  
1277 * @endcode
1278 *
1279 * The only interesting part is the following. Recall that we modified the
1280 * residual vector's entries corresponding to the algebraic components of
1281 * the solution in the previous function. The outcome of calling
1282 * `current_constraints.distribute_local_to_global()` a few lines
1283 * above is that the global matrix has zero rows and columns for the
1284 * algebraic (constrained) components of the solution; the function
1285 * puts a value on the diagonal that is nonzero and has about the
1286 * same size as the remaining diagonal entries of the matrix. What
1287 * this diagonal value is is unknown to us -- in other cases where
1288 * we call `current_constraints.distribute_local_to_global()` on
1289 * both the left side matrix and the right side vector, as in most
1290 * other tutorial programs, the matrix diagonal entries and right
1291 * hand side values are chosen in such a way that the result of
1292 * solving a linear system is what we want it to be, but the scaling
1293 * is done automatically.
1294 *
1295
1296 *
1297 * This is not good enough for us here, because we are building the
1298 * right hand side independently from the matrix in different functions.
1299 * Thus, for any constrained degree of freedom, we set the diagonal of the
1300 * Jacobian to be one. This leaves the Jacobian matrix invertible,
1301 * consistent with what the time stepper expects, and it also makes sure
1302 * that if we did not make a mistake in the residual and/or in the Jacbian
1303 * matrix, then asking the time stepper to check the Jacobian with a finite
1304 * difference method will produce the correct result. This can be activated
1305 * at run time via passing the `-snes_test_jacobian` option on the command
1306 * line.
1307 *
1308 * @code
1309 *   for (const auto &c : current_constraints.get_lines())
1310 *   jacobian_matrix.set(c.index, c.index, 1.0);
1311 *   jacobian_matrix.compress(VectorOperation::insert);
1312 *   }
1313 *  
1314 *  
1315 * @endcode
1316 *
1317 *
1318 * <a name="step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1319 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1320 *
1321
1322 *
1323 * This is the function that actually solves the linear system with the
1324 * Jacobian matrix we have previously built (in a call to the previous
1325 * function during the current time step or another earlier one -- time
1326 * steppers are quite sophisticated in determining internally whether it is
1327 * necessary to update the Jacobian matrix, or whether one can reuse it for
1328 * another time step without rebuilding it; this is similar to how one can
1329 * re-use the Newton matrix for several Newton steps, see for example the
1330 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1331 * the time stepper, and instead select a specific solver on the command line
1332 * by using the `-ksp_*` options of PETSc. However, by providing this
1333 * function, we can use a specific solver and preconditioner for the linear
1334 * system, and still have the possibility to change them on the command line.
1335 *
1336
1337 *
1338 * Providing a specific solver is more in line with the way we usually do
1339 * things in other deal.II examples, while letting PETSc choose a generic
1340 * solver, and changing it on the command line via `-ksp_type` is more in line
1341 * with the way PETSc is usually used, and it can be a convenient approach
1342 * when we are experimenting to find an optimal solver for our problem. Both
1343 * options are available here, since we can still change both the solver and
1344 * the preconditioner on the command line via `-user_ksp_type` and
1345 * `-user_pc_type` options.
1346 *
1347
1348 *
1349 * In any case, recall that the Jacobian we built in the previous function is
1350 * always of the form
1351 * @f[
1352 * J_\alpha = \alpha M + A
1353 * @f]
1354 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1355 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1356 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1357 * positive definite matrix, which we can efficiently solve with the Conjugate
1358 * Gradient method, along with either SSOR or (if available) the algebraic
1359 * multigrid implementation provided by PETSc (via the Hypre package) as
1360 * preconditioner. In practice, if you wanted to solve "real" problems, one
1361 * would spend some time finding which preconditioner is optimal, perhaps
1362 * using PETSc's ability to read solver and preconditioner choices from the
1363 * command line. But this is not the focus of this tutorial program, and so
1364 * we just go with the following:
1365 *
1366 * @code
1367 *   template <int dim>
1368 *   void
1369 *   HeatEquation<dim>::solve_with_jacobian(const PETScWrappers::MPI::Vector &src,
1370 *   PETScWrappers::MPI::Vector &dst)
1371 *   {
1372 *   TimerOutput::Scope t(computing_timer, "solve with Jacobian");
1373 *  
1374 *   #if defined(PETSC_HAVE_HYPRE)
1375 *   PETScWrappers::PreconditionBoomerAMG preconditioner;
1376 *   preconditioner.initialize(jacobian_matrix);
1377 *   #else
1378 *   PETScWrappers::PreconditionSSOR preconditioner;
1379 *   preconditioner.initialize(
1380 *   jacobian_matrix, PETScWrappers::PreconditionSSOR::AdditionalData(1.0));
1381 *   #endif
1382 *  
1383 *   SolverControl solver_control(1000, 1e-8 * src.l2_norm());
1384 *   PETScWrappers::SolverCG cg(solver_control);
1385 *   cg.set_prefix("user_");
1386 *  
1387 *   cg.solve(jacobian_matrix, dst, src, preconditioner);
1388 *  
1389 *   pcout << " " << solver_control.last_step() << " linear iterations."
1390 *   << std::endl;
1391 *   }
1392 *  
1393 *  
1394 * @endcode
1395 *
1396 *
1397 * <a name="step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1398 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1399 *
1400
1401 *
1402 * The next block of functions deals with mesh refinement. We split this
1403 * process up into a "decide whether and what you want to refine" and a
1404 * "please transfer these vectors from old to new mesh" phase, where the first
1405 * also deals with marking cells for refinement. (The decision whether or
1406 * not to refine is done in the lambda function that calls the current
1407 * function.)
1408 *
1409
1410 *
1411 * Breaking things into a "mark cells" function and into a "execute mesh
1412 * adaptation and transfer solution vectors" function is awkward, though
1413 * conceptually not difficult to understand. These two pieces of code
1414 * should really be part of the same function, as they are in @ref step_26 "step-26".
1415 * The issue is with what PETScWrappers::TimeStepper provides us with in these
1416 * callbacks. Specifically, the "decide whether and what you want to refine"
1417 * callback has access to the current solution, and so can evaluate
1418 * (spatial) error estimators to decide which cells to refine. The
1419 * second callback that transfers vectors from old to new mesh gets a bunch
1420 * of vectors, but without the semantic information on which of these is
1421 * the current solution vector. As a consequence, it cannot do the marking
1422 * of cells for refinement or coarsening, and we have to do that from the
1423 * first callback.
1424 *
1425
1426 *
1427 * In practice, however, the problem is minor. The first of these
1428 * two functions is essentially identical to the
1429 * first half of the corresponding function in @ref step_26 "step-26", with the only
1430 * difference that it uses the parallel::distributed::GridRefinement namespace
1431 * function instead of the serial one. Once again, we make sure that we never
1432 * fall below the minimum refinement level, and above the maximum one, that we
1433 * can select from the parameter file.
1434 *
1435 * @code
1436 *   template <int dim>
1437 *   void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1438 *   const PETScWrappers::MPI::Vector &y)
1439 *   {
1440 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1441 *   locally_relevant_dofs,
1442 *   mpi_communicator);
1443 *   locally_relevant_solution = y;
1444 *  
1445 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1446 *   KellyErrorEstimator<dim>::estimate(dof_handler,
1447 *   QGauss<dim - 1>(fe.degree + 1),
1448 *   {},
1449 *   locally_relevant_solution,
1450 *   estimated_error_per_cell);
1451 *  
1453 *   triangulation, estimated_error_per_cell, 0.6, 0.4);
1454 *  
1455 *   const unsigned int max_grid_level =
1456 *   initial_global_refinement + max_delta_refinement_level;
1457 *   const unsigned int min_grid_level = initial_global_refinement;
1458 *  
1459 *   if (triangulation.n_levels() > max_grid_level)
1460 *   for (const auto &cell :
1461 *   triangulation.active_cell_iterators_on_level(max_grid_level))
1462 *   cell->clear_refine_flag();
1463 *   for (const auto &cell :
1464 *   triangulation.active_cell_iterators_on_level(min_grid_level))
1465 *   cell->clear_coarsen_flag();
1466 *   }
1467 *  
1468 *  
1469 * @endcode
1470 *
1471 *
1472 * <a name="step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1473 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1474 *
1475
1476 *
1477 * The following function then is the second half of the correspond function
1478 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1479 * transfer the solution and any intermediate stage vectors to a new mesh. We
1480 * must make sure that all input vectors are transformed into ghosted vectors
1481 * before the actual transfer is executed, and that we distribute the hanging
1482 * node constraints on the output vectors as soon as we have interpolated the
1483 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1484 * the vectors we transfer.
1485 *
1486
1487 *
1488 * We have no way to enforce boundary conditions at this stage, however. This
1489 * is because the various vectors may correspond to solutions at previous time
1490 * steps if the method used here is a multistep time integrator, and so may
1491 * correspond to different time points that we are not privy to.
1492 *
1493
1494 *
1495 * While this could be a problem if we used the values of the solution and of
1496 * the intermediate stages on the constrained degrees of freedom to compute
1497 * the errors, we do not do so. Instead, we compute the errors on the
1498 * differential equation, and ignore algebraic constraints; therefore we do no
1499 * need to guarantee that the boundary conditions are satisfied also in the
1500 * intermediate stages.
1501 *
1502
1503 *
1504 * We have at our disposal the hanging node current_constraints alone, though,
1505 * and therefore we enforce them on the output vectors, even if this is not
1506 * really needed.
1507 *
1508 * @code
1509 *   template <int dim>
1510 *   void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1511 *   const double time,
1512 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1513 *   std::vector<PETScWrappers::MPI::Vector> &all_out)
1514 *   {
1516 *   dof_handler);
1517 *  
1518 *   std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1519 *   std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1520 *   all_in.size());
1521 *   std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1522 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1523 *   {
1524 *   all_in_ghosted[i].reinit(locally_owned_dofs,
1525 *   locally_relevant_dofs,
1526 *   mpi_communicator);
1527 *   all_in_ghosted[i] = all_in[i];
1528 *   all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1529 *   }
1530 *  
1531 *   triangulation.prepare_coarsening_and_refinement();
1532 *   solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1533 *   triangulation.execute_coarsening_and_refinement();
1534 *  
1535 *   setup_system(time);
1536 *  
1537 *   all_out.resize(all_in.size());
1538 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1539 *   {
1540 *   all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1541 *   all_out_ptr[i] = &all_out[i];
1542 *   }
1543 *   solution_trans.interpolate(all_out_ptr);
1544 *  
1545 *   for (PETScWrappers::MPI::Vector &v : all_out)
1546 *   hanging_node_constraints.distribute(v);
1547 *   }
1548 *  
1549 *  
1550 * @endcode
1551 *
1552 *
1553 * <a name="step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1554 * <h4>The HeatEquation::update_current_constraints() function</h4>
1555 *
1556
1557 *
1558 * Since regenerating the constraints at each time step may be expensive, we
1559 * make sure that we only do so when the time changes. We track time change by
1560 * checking if the time of the boundary_values_function has changed, with
1561 * respect to the time of the last call to this function. This will work most
1562 * of the times, but not the very first time we call this function, since the
1563 * time then may be zero and the time of the `boundary_values_function` is
1564 * zero at construction time. We therefore also check if the number
1565 * constraints in `current_constraints`, and if these are empty, we regenerate
1566 * the constraints regardless of the time variable.
1567 *
1568 * @code
1569 *   template <int dim>
1570 *   void HeatEquation<dim>::update_current_constraints(const double time)
1571 *   {
1572 *   if (Utilities::MPI::sum(current_constraints.n_constraints(),
1573 *   mpi_communicator) == 0 ||
1574 *   time != boundary_values_function.get_time())
1575 *   {
1576 *   TimerOutput::Scope t(computing_timer, "update current constraints");
1577 *  
1578 *   boundary_values_function.set_time(time);
1579 *   current_constraints.clear();
1580 *   current_constraints.reinit(locally_owned_dofs,
1581 *   hanging_node_constraints.get_local_lines());
1582 *   current_constraints.merge(hanging_node_constraints);
1583 *   VectorTools::interpolate_boundary_values(dof_handler,
1584 *   0,
1585 *   boundary_values_function,
1586 *   current_constraints);
1587 *   current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1588 *   locally_relevant_dofs,
1589 *   mpi_communicator);
1590 *   current_constraints.close();
1591 *   }
1592 *   }
1593 *  
1594 *  
1595 * @endcode
1596 *
1597 *
1598 * <a name="step_86-TheHeatEquationrunfunction"></a>
1599 * <h4>The HeatEquation::run() function</h4>
1600 *
1601
1602 *
1603 * We have finally arrived at the main function of the class. At the top, it
1604 * creates the mesh and sets up the variables that make up the linear system:
1605 *
1606 * @code
1607 *   template <int dim>
1608 *   void HeatEquation<dim>::run()
1609 *   {
1610 *   GridGenerator::hyper_L(triangulation);
1611 *   triangulation.refine_global(initial_global_refinement);
1612 *  
1613 *   setup_system(/* time */ 0);
1614 *  
1615 * @endcode
1616 *
1617 * We then set up the time stepping object and associate the matrix we will
1618 * build whenever requested for both the Jacobian matrix (see the definition
1619 * above of what the "Jacobian" actually refers to) and for the matrix
1620 * that will be used as a preconditioner for the Jacobian.
1621 *
1622 * @code
1625 *   petsc_ts(time_stepper_data);
1626 *  
1627 *   petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1628 *  
1629 *  
1630 * @endcode
1631 *
1632 * The real work setting up the time stepping object starts here. As
1633 * discussed in the introduction, the way the PETScWrappers::TimeStepper
1634 * class is used is by inverting control: At the end of this function, we
1635 * will call PETScWrappers::TimeStepper::solve() which internally will
1636 * run the loop over time steps, and at the appropriate places *call back*
1637 * into user code for whatever functionality is required. What we need to
1638 * do is to hook up these callback functions by assigning appropriate
1639 * lambda functions to member variables of the `petsc_ts` object.
1640 *
1641
1642 *
1643 * We start by creating lambda functions that provide information about
1644 * the "implicit function" (i.e., that part of the right hand side of the
1645 * ODE system that we want to treat implicitly -- which in our case is
1646 * the entire right hand side), a function that assembles the Jacobian
1647 * matrix, and a function that solves a linear system with the Jacobian.
1648 *
1649 * @code
1650 *   petsc_ts.implicit_function = [&](const double time,
1651 *   const PETScWrappers::MPI::Vector &y,
1652 *   const PETScWrappers::MPI::Vector &y_dot,
1653 *   PETScWrappers::MPI::Vector &res) {
1654 *   this->implicit_function(time, y, y_dot, res);
1655 *   };
1656 *  
1657 *   petsc_ts.setup_jacobian = [&](const double time,
1658 *   const PETScWrappers::MPI::Vector &y,
1659 *   const PETScWrappers::MPI::Vector &y_dot,
1660 *   const double alpha) {
1661 *   this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1662 *   };
1663 *  
1664 *   petsc_ts.solve_with_jacobian = [&](const PETScWrappers::MPI::Vector &src,
1665 *   PETScWrappers::MPI::Vector &dst) {
1666 *   this->solve_with_jacobian(src, dst);
1667 *   };
1668 *  
1669 * @endcode
1670 *
1671 * The next two callbacks deal with identifying and setting variables
1672 * that are considered "algebraic" (rather than "differential"), i.e., for
1673 * which we know what values they are supposed to have rather than letting
1674 * their values be determined by the differential equation. We need to
1675 * instruct the time stepper to ignore these components when computing the
1676 * error in the residuals, and we do so by first providing a function that
1677 * returns an IndexSet with the indices of these algebraic components of the
1678 * solution vector (or rather, that subset of the locally-owned part of the
1679 * vector that is algebraic, in case we are running in parallel). This first
1680 * of the following two functions does that. Specifically, both nodes at
1681 * which Dirichlet boundary conditions are applied, and hanging nodes are
1682 * algebraically constrained. This function then returns a set of
1683 * indices that is initially empty (but knows about the size of the index
1684 * space) and which we then construct as the union of boundary and hanging
1685 * node indices.
1686 *
1687
1688 *
1689 * Following this, we then also need a function that, given a solution
1690 * vector `y` and the current time, sets the algebraic components of that
1691 * vector to their correct value. This comes down to ensuring that we have
1692 * up to date constraints in the `constraints` variable, and then applying
1693 * these constraints to the solution vector via
1694 * AffineConstraints::distribute(). (It is perhaps worth noting that we
1695 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1696 * time stepper solves a linear system, it follows up the call to the solver
1697 * by calling the callback to set algebraic components correct. We could
1698 * also have put the calls to `update_current_constraints()` and
1699 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1700 * so, we can also replace the `solve_with_jacobian` function with a call to
1701 * a PETSc solver, and we would still have the current_constraints correctly
1702 * applied to the solution vector.)
1703 *
1704 * @code
1705 *   petsc_ts.algebraic_components = [&]() {
1706 *   IndexSet algebraic_set(dof_handler.n_dofs());
1707 *   algebraic_set.add_indices(DoFTools::extract_boundary_dofs(dof_handler));
1708 *   algebraic_set.add_indices(
1709 *   DoFTools::extract_hanging_node_dofs(dof_handler));
1710 *   return algebraic_set;
1711 *   };
1712 *  
1713 *   petsc_ts.update_constrained_components =
1714 *   [&](const double time, PETScWrappers::MPI::Vector &y) {
1715 *   TimerOutput::Scope t(computing_timer, "set algebraic components");
1716 *   update_current_constraints(time);
1717 *   current_constraints.distribute(y);
1718 *   };
1719 *  
1720 *  
1721 * @endcode
1722 *
1723 * The next two callbacks relate to mesh refinement. As discussed in the
1724 * introduction, PETScWrappers::TimeStepper knows how to deal with the
1725 * situation where we want to change the mesh. All we have to provide
1726 * is a callback that returns `true` if we are at a point where we want
1727 * to refine the mesh (and `false` otherwise) and that if we want to
1728 * do mesh refinement does some prep work for that in the form of
1729 * calling the `prepare_for_coarsening_and_refinement` function.
1730 *
1731
1732 *
1733 * If the first callback below returns `true`, then PETSc TS will
1734 * do some clean-up operations, and call the second of the
1735 * callback functions
1736 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1737 * collection of vectors that need to be interpolated from the old
1738 * to the new mesh. This may include the current solution, perhaps
1739 * the current time derivative of the solution, and in the case of
1740 * [multistep time
1741 * integrators](https://en.wikipedia.org/wiki/Linear_multistep_method) also
1742 * the solutions of some previous time steps. We hand all of these over to
1743 * the `interpolate()` member function of this class.
1744 *
1745 * @code
1746 *   petsc_ts.decide_and_prepare_for_remeshing =
1747 *   [&](const double /* time */,
1748 *   const unsigned int step_number,
1749 *   const PETScWrappers::MPI::Vector &y) -> bool {
1750 *   if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1751 *   step_number % this->mesh_adaptation_frequency == 0)
1752 *   {
1753 *   pcout << std::endl << "Adapting the mesh..." << std::endl;
1754 *   this->prepare_for_coarsening_and_refinement(y);
1755 *   return true;
1756 *   }
1757 *   else
1758 *   return false;
1759 *   };
1760 *  
1761 *   petsc_ts.transfer_solution_vectors_to_new_mesh =
1762 *   [&](const double time,
1763 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1764 *   std::vector<PETScWrappers::MPI::Vector> &all_out) {
1765 *   this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1766 *   };
1767 *  
1768 * @endcode
1769 *
1770 * The final callback is a "monitor" that is called in each
1771 * time step. Here we use it to create a graphical output. Perhaps a better
1772 * scheme would output the solution at fixed time intervals, rather
1773 * than in every time step, but this is not the main point of
1774 * this program and so we go with the easy approach:
1775 *
1776 * @code
1777 *   petsc_ts.monitor = [&](const double time,
1778 *   const PETScWrappers::MPI::Vector &y,
1779 *   const unsigned int step_number) {
1780 *   pcout << "Time step " << step_number << " at t=" << time << std::endl;
1781 *   this->output_results(time, step_number, y);
1782 *   };
1783 *  
1784 *  
1785 * @endcode
1786 *
1787 * With all of this out of the way, the rest of the function is
1788 * anticlimactic: We just have to initialize the solution vector with
1789 * the initial conditions and call the function that does the time
1790 * stepping, and everything else will happen automatically:
1791 *
1792 * @code
1793 *   PETScWrappers::MPI::Vector solution(locally_owned_dofs, mpi_communicator);
1794 *   VectorTools::interpolate(dof_handler, initial_value_function, solution);
1795 *  
1796 *   petsc_ts.solve(solution);
1797 *   }
1798 *   } // namespace Step86
1799 *  
1800 *  
1801 * @endcode
1802 *
1803 *
1804 * <a name="step_86-Themainfunction"></a>
1805 * <h3>The main() function</h3>
1806 *
1807
1808 *
1809 * The rest of the program is as it always looks. We read run-time parameters
1810 * from an input file via the ParameterAcceptor class in the same way as
1811 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1812 *
1813 * @code
1814 *   int main(int argc, char **argv)
1815 *   {
1816 *   try
1817 *   {
1818 *   using namespace Step86;
1819 *  
1820 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1821 *   HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1822 *  
1823 *   const std::string input_filename =
1824 *   (argc > 1 ? argv[1] : "heat_equation.prm");
1825 *   ParameterAcceptor::initialize(input_filename, "heat_equation_used.prm");
1826 *   heat_equation_solver.run();
1827 *   }
1828 *   catch (std::exception &exc)
1829 *   {
1830 *   std::cerr << std::endl
1831 *   << std::endl
1832 *   << "----------------------------------------------------"
1833 *   << std::endl;
1834 *   std::cerr << "Exception on processing: " << std::endl
1835 *   << exc.what() << std::endl
1836 *   << "Aborting!" << std::endl
1837 *   << "----------------------------------------------------"
1838 *   << std::endl;
1839 *  
1840 *   return 1;
1841 *   }
1842 *   catch (...)
1843 *   {
1844 *   std::cerr << std::endl
1845 *   << std::endl
1846 *   << "----------------------------------------------------"
1847 *   << std::endl;
1848 *   std::cerr << "Unknown exception!" << std::endl
1849 *   << "Aborting!" << std::endl
1850 *   << "----------------------------------------------------"
1851 *   << std::endl;
1852 *   return 1;
1853 *   }
1854 *  
1855 *   return 0;
1856 *   }
1857 * @endcode
1858<a name="step_86-Results"></a><h1>Results</h1>
1859
1860
1861When you run this program with the input file as is, you get output as follows:
1862@code
1863
1864Number of active cells: 768
1865Number of degrees of freedom: 833
1866
1867Time step 0 at t=0
1868 5 linear iterations.
1869 8 linear iterations.
1870Time step 1 at t=0.025
1871 6 linear iterations.
1872Time step 2 at t=0.05
1873 5 linear iterations.
1874 8 linear iterations.
1875Time step 3 at t=0.075
1876 6 linear iterations.
1877Time step 4 at t=0.1
1878 6 linear iterations.
1879Time step 5 at t=0.125
1880 6 linear iterations.
1881Time step 6 at t=0.15
1882 6 linear iterations.
1883Time step 7 at t=0.175
1884 6 linear iterations.
1885Time step 8 at t=0.2
1886 6 linear iterations.
1887Time step 9 at t=0.225
1888 6 linear iterations.
1889
1890Adapting the mesh...
1891
1892Number of active cells: 1050
1893Number of degrees of freedom: 1155
1894
1895Time step 10 at t=0.25
1896 5 linear iterations.
1897 8 linear iterations.
1898Time step 11 at t=0.275
1899 5 linear iterations.
1900 7 linear iterations.
1901
1902[...]
1903
1904Time step 195 at t=4.875
1905 6 linear iterations.
1906Time step 196 at t=4.9
1907 6 linear iterations.
1908Time step 197 at t=4.925
1909 6 linear iterations.
1910Time step 198 at t=4.95
1911 6 linear iterations.
1912Time step 199 at t=4.975
1913 5 linear iterations.
1914
1915Adapting the mesh...
1916
1917Number of active cells: 1380
1918Number of degrees of freedom: 1547
1919
1920Time step 200 at t=5
1921
1922
1923+---------------------------------------------+------------+------------+
1924| Total wallclock time elapsed since start | 43.2s | |
1925| | | |
1926| Section | no. calls | wall time | % of total |
1927+---------------------------------+-----------+------------+------------+
1928| assemble implicit Jacobian | 226 | 9.93s | 23% |
1929| implicit function | 426 | 16.2s | 37% |
1930| output results | 201 | 9.74s | 23% |
1931| set algebraic components | 200 | 0.0496s | 0.11% |
1932| setup system | 21 | 0.799s | 1.8% |
1933| solve with Jacobian | 226 | 0.56s | 1.3% |
1934| update current constraints | 201 | 1.53s | 3.5% |
1935+---------------------------------+-----------+------------+------------+
1936
1937@endcode
1938
1939We can generate output for this in the form of a video:
1940@htmlonly
1941<p align="center">
1942 <iframe width="560" height="315" src="https://www.youtube.com/embed/fhJVkcdRksM"
1943 frameborder="0"
1944 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1945 allowfullscreen></iframe>
1946 </p>
1947@endhtmlonly
1948The solution here is driven by boundary values (the initial conditions are zero,
1949and so is the right hand side of the equation). It takes a little bit of time
1950for the boundary values to diffuse into the domain, and so the temperature
1951(the solution of the heat equation) in the interior of the domain has a slight
1952lag compared to the temperature at the boundary.
1953
1954
1955The more interesting component of this program is how easy it is to play with
1956the details of the time stepping algorithm. Recall that the solution above
1957is controlled by the following parameters:
1958@code
1959 subsection Time stepper
1960 subsection Running parameters
1961 set final time = 5
1962 set initial step size = 0.025
1963 set initial time = 0
1964 set match final time = false
1965 set maximum number of steps = -1
1966 set options prefix =
1967 set solver type = beuler
1968 end
1969
1970 subsection Error control
1971 set absolute error tolerance = -1
1972 set adaptor type = none
1973 set ignore algebraic lte = true
1974 set maximum step size = -1
1975 set minimum step size = -1
1976 set relative error tolerance = -1
1977 end
1978 end
1979@endcode
1980Of particular interest for us here is to set the time stepping algorithm
1981and the adaptive time step control method. The latter is set to "none"
1982above, but there are
1983[several alternative choices](https://petsc.org/release/manualpages/TS/TSAdaptType/)
1984for this parameter. For example, we can set parameters as follows,
1985@code
1986 subsection Time stepper
1987 subsection Running parameters
1988 set final time = 5
1989 set initial step size = 0.025
1990 set initial time = 0
1991 set match final time = false
1992 set maximum number of steps = -1
1993 set options prefix =
1994 set solver type = bdf
1995 end
1996
1997 subsection Error control
1998 set absolute error tolerance = 1e-2
1999 set relative error tolerance = 1e-2
2000 set adaptor type = basic
2001 set ignore algebraic lte = true
2002 set maximum step size = 1
2003 set minimum step size = 0.01
2004 end
2005 end
2006@endcode
2007What we do here is set the initial time step size to 0.025, and choose relatively
2008large absolute and relative error tolerances of 0.01 for the time step size
2009adaptation algorithm (for which we choose "basic"). We ask PETSc TS to use a
2010[Backward Differentiation Formula (BDF)](https://en.wikipedia.org/wiki/Backward_differentiation_formula)
2011method, and we get the following as output:
2012@code
2013
2014===========================================
2015Number of active cells: 768
2016Number of degrees of freedom: 833
2017
2018Time step 0 at t=0
2019 5 linear iterations.
2020 5 linear iterations.
2021 5 linear iterations.
2022 4 linear iterations.
2023 6 linear iterations.
2024Time step 1 at t=0.01
2025 5 linear iterations.
2026 5 linear iterations.
2027Time step 2 at t=0.02
2028 5 linear iterations.
2029 5 linear iterations.
2030Time step 3 at t=0.03
2031 5 linear iterations.
2032 5 linear iterations.
2033Time step 4 at t=0.042574
2034 5 linear iterations.
2035 5 linear iterations.
2036Time step 5 at t=0.0588392
2037 5 linear iterations.
2038 5 linear iterations.
2039Time step 6 at t=0.0783573
2040 5 linear iterations.
2041 7 linear iterations.
2042 5 linear iterations.
2043 7 linear iterations.
2044Time step 7 at t=0.100456
2045 5 linear iterations.
2046 7 linear iterations.
2047 5 linear iterations.
2048 7 linear iterations.
2049Time step 8 at t=0.124982
2050 5 linear iterations.
2051 8 linear iterations.
2052 5 linear iterations.
2053 7 linear iterations.
2054Time step 9 at t=0.153156
2055 6 linear iterations.
2056 5 linear iterations.
2057
2058[...]
2059
2060Time step 206 at t=4.96911
2061 5 linear iterations.
2062 5 linear iterations.
2063Time step 207 at t=4.99398
2064 5 linear iterations.
2065 5 linear iterations.
2066Time step 208 at t=5.01723
2067
2068
2069+---------------------------------------------+------------+------------+
2070| Total wallclock time elapsed since start | 117s | |
2071| | | |
2072| Section | no. calls | wall time | % of total |
2073+---------------------------------+-----------+------------+------------+
2074| assemble implicit Jacobian | 593 | 35.6s | 31% |
2075| implicit function | 1101 | 56.3s | 48% |
2076| output results | 209 | 12.5s | 11% |
2077| set algebraic components | 508 | 0.156s | 0.13% |
2078| setup system | 21 | 1.11s | 0.95% |
2079| solve with Jacobian | 593 | 1.97s | 1.7% |
2080| update current constraints | 509 | 4.53s | 3.9% |
2081+---------------------------------+-----------+------------+------------+
2082@endcode
2083What is happening here is that apparently PETSc TS is not happy with our
2084choice of initial time step size, and after several linear solves has
2085reduced it to the minimum we allow it to, 0.01. The following time steps
2086then run at a time step size of 0.01 until it decides to make it slightly
2087larger again and (apparently) switches to a higher order method that
2088requires more linear solves per time step but allows for a larger time
2089step closer to our initial choice 0.025 again. It does not quite hit the
2090final time of @f$T=5@f$ with its time step choices, but we've got only
2091ourselves to blame for that by setting
2092@code
2093 set match final time = false
2094@endcode
2095in the input file. If hitting the end time exactly is important to us,
2096setting the flag to `true` resolves this issue.
2097
2098We can even reason why PETSc eventually chooses a time step of around
20990.025: The boundary values undergo a complete cosine cycle within 0.5
2100time units; we should expect that it takes around ten or twenty time
2101steps to resolve each period of a cycle to reasonable accuracy, and
2102this leads to the time step choice PETSc finds.
2103
2104Not all combinations of methods, time step adaptation algorithms, and
2105other parameters are valid, but the main messages from the experiment
2106above that you should take away are:
2107- It would undoubtedly be quite time consuming to implement many of the
2108 methods that PETSc TS offers for time stepping -- but with a program
2109 such as the one here, we don't need to: We can just select from the
2110 many methods PETSc TS already has.
2111- Adaptive time step control is difficult; adaptive choice of which
2112 method or order to choose is perhaps even more difficult. None of the
2113 time dependent programs that came before the current one (say, @ref step_23 "step-23",
2114 @ref step_26 "step-26", @ref step_31 "step-31", @ref step_58 "step-58", and a good number of others) have either.
2115 Moreover, while deal.II is good at spatial adaptation of meshes, it
2116 is not a library written by experts in time step adaptation, and so will
2117 likely not gain this ability either. But, again, it doesn't
2118 have to: We can rely on a library written by experts in that area.
2119
2120
2121
2122<a name="step-86-extensions"></a>
2123<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2124
2125
2126The program actually runs in parallel, even though we have not used
2127that above. Specifically, if you have configured deal.II to use MPI,
2128then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2129program with 8 processes.
2130
2131For the program as currently written (and in debug mode), this makes
2132little difference: It will run about twice as fast, but take about 8
2133times as much CPU time. That is because the problem is just so small:
2134Generally between 1000 and 2000 degrees of freedom. A good rule of
2135thumb is that programs really only benefit from parallel computing if
2136you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2137process*. But it is not difficult to adapt the program at hand here to
2138run with a much finer mesh, or perhaps in 3d, so that one is beyond
2139that limit and sees the benefits of parallel computing.
2140 *
2141 *
2142<a name="step_86-PlainProg"></a>
2143<h1> The plain program</h1>
2144@include "step-86.cc"
2145*/
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
const bool IsBlockVector< VectorType >::value
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:68
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > &times_and_names)
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:616
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_hanging_node_dofs(const DoFHandler< dim, spacedim > &dof_handler)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
constexpr char U
constexpr types::blas_int zero
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:120
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:466
void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask={}, const unsigned int level=numbers::invalid_unsigned_int)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
STL namespace.