171 * The included deal.II header files are the same as in the other example
175 * #include <deal.II/base/function.h>
176 * #include <deal.II/base/logstream.h>
177 * #include <deal.II/base/quadrature_lib.h>
179 * #include <deal.II/dofs/dof_accessor.h>
180 * #include <deal.II/dofs/dof_handler.h>
181 * #include <deal.II/dofs/dof_tools.h>
183 * #include <deal.II/fe/fe_q.h>
184 * #include <deal.II/fe/fe_values.h>
186 * #include <deal.II/grid/grid_generator.h>
187 * #include <deal.II/grid/tria.h>
188 * #include <deal.II/grid/tria_accessor.h>
189 * #include <deal.II/grid/tria_iterator.h>
191 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
192 * #include <deal.II/lac/full_matrix.h>
193 * #include <deal.II/lac/precondition.h>
194 * #include <deal.II/lac/solver_cg.h>
195 * #include <deal.II/lac/sparse_matrix.h>
196 * #include <deal.II/lac/vector.h>
198 * #include <deal.II/numerics/data_out.h>
199 * #include <deal.II/numerics/matrix_tools.h>
200 * #include <deal.II/numerics/vector_tools.h>
203 * In addition to the deal.II header files, we include the preCICE API in order
204 * to obtain access to preCICE specific functionality
207 * #include <precice/precice.hpp>
210 * #include <iostream>
216 * Configuration parameters
220 * We set up a simple hard-coded
struct containing all names we need for
221 * external coupling. The
struct includes the name of the preCICE
222 * configuration file as well as the name of the simulation participant, the
223 * name of the coupling mesh and the name of the exchanged data. The last three
224 * names you also find in the preCICE configuration file. For real application
225 * cases, these names are better handled by a parameter file.
228 *
struct CouplingParamters
230 *
const std::string config_file =
"precice-config.xml";
231 *
const std::string participant_name =
"laplace-solver";
232 *
const std::string mesh_name =
"dealii-mesh";
233 *
const std::string read_data_name =
"boundary-data";
243 * The Adapter
class handles all functionalities to couple the deal.II solver
244 * code to other solvers with preCICE, i.e., data structures are set up and all
245 * relevant information is passed to preCICE.
251 *
template <
int dim,
typename ParameterClass>
255 * Adapter(
const ParameterClass & parameters,
260 * std::map<types::global_dof_index, double> &boundary_data,
264 * read_data(
double relative_read_time,
265 * std::map<types::global_dof_index, double> &boundary_data);
268 *
advance(
const double computed_timestep_length);
272 *
public precCICE solver interface
275 * precice::Participant precice;
279 * Boundary ID of the deal.II triangulation, associated with the coupling
280 * interface. The variable is defined in the constructor of
this class and
281 * intentionally
public so that it can be used during the grid generation and
282 * system assembly. The only thing,
one needs to make sure is that
this ID is
283 * unique
for a particular triangulation.
286 *
const unsigned int dealii_boundary_interface_id;
291 * preCICE related initializations
292 * These variables are specified in and read from a parameter file, which is
293 * in
this simple tutorial program the CouplingParameter
struct already
294 * introduced in the beginning.
297 *
const std::string mesh_name;
298 *
const std::string read_data_name;
302 * The node IDs are filled by preCICE during the initialization and associated to
303 * the spherical vertices we pass to preCICE
306 *
int n_interface_nodes;
310 * DoF
IndexSet, containing relevant coupling DoF indices at the coupling
318 * Data containers which are passed to preCICE in an appropriate preCICE
322 * std::vector<int> interface_nodes_ids;
323 * std::vector<double> read_data_buffer;
327 * The
MPI rank and total number of
MPI ranks is required by preCICE when the
328 * Participant is created. Since
this tutorial runs only in
serial mode we
329 * define the variables manually in
this class instead of using the regular
339 * map
for Dirichlet boundary conditions
343 * format_precice_to_dealii(
344 * std::map<types::global_dof_index, double> &boundary_data)
const;
351 * In the constructor of the Adapter
class, we set up the preCICE
352 * Participant. We need to tell preCICE our name as participant of the
353 * simulation and the name of the preCICE configuration file. Both have already
354 * been specified in the CouplingParameter
class above. Thus, we pass the class
355 * directly to the constructor and read out all relevant information. As a
356 * second parameter, we need to specify the boundary ID of our triangulation,
357 * which is associated with the coupling interface.
360 *
template <
int dim,
typename ParameterClass>
361 * Adapter<dim, ParameterClass>::Adapter(
362 *
const ParameterClass & parameters,
364 * : precice(parameters.participant_name,
365 * parameters.config_file,
368 * , dealii_boundary_interface_id(deal_boundary_interface_id)
369 * , mesh_name(parameters.mesh_name)
370 * , read_data_name(parameters.read_data_name)
377 * This function initializes preCICE (
e.g. establishes communication channels
378 * and allocates memory) and passes all relevant data to preCICE. For surface
379 * coupling, relevant data is in particular the location of the data points at
380 * the associated interface(s). The `boundary_data` is an empty map, which is
381 * filled by preCICE, i.e., information of the other participant. Throughout
382 * the system assembly, the map can directly be used in order to
apply the
383 * Dirichlet boundary conditions in the linear system.
386 *
template <
int dim,
typename ParameterClass>
388 * Adapter<dim, ParameterClass>::initialize(
390 * std::map<types::global_dof_index, double> &boundary_data,
393 *
Assert(dim > 1, ExcNotImplemented());
398 * Afterwards, we
extract the number of
interface nodes and the coupling DoFs
399 * at the coupling
interface from our deal.II solver via
403 * std::set<types::boundary_id> couplingBoundary;
404 * couplingBoundary.insert(dealii_boundary_interface_id);
408 * The `
ComponentMask()` might be important in
case we deal with vector valued
409 * problems, because vector valued problems have a DoF
for each component.
418 * The coupling DoFs are used to set up the `boundary_data` map. At the
end,
419 * we associate here each DoF with a respective boundary
value.
422 *
for (
const auto i : coupling_dofs)
423 * boundary_data[i] = 0.0;
427 * Since we deal with a
scalar problem, the number of DoFs at the particular
428 *
interface corresponds to the number of interface nodes.
431 * n_interface_nodes = coupling_dofs.n_elements();
433 * std::cout <<
"\t Number of coupling nodes: " << n_interface_nodes
438 * Now, we need to tell preCICE the coordinates of the
interface nodes. Hence,
439 * we set up a std::vector to pass the node positions to preCICE. Each node is
440 * specified only once.
443 * std::vector<double> interface_nodes_positions;
444 * interface_nodes_positions.reserve(dim * n_interface_nodes);
448 * Set up the appropriate size of the data container needed
for data
450 * is read/written per
interface node.
453 * read_data_buffer.resize(n_interface_nodes);
456 * The IDs are filled by preCICE during the initializations.
459 * interface_nodes_ids.resize(n_interface_nodes);
466 * std::map<types::global_dof_index, Point<dim>> support_points;
471 * `support_points` contains now the coordinates of all DoFs. In the next
472 * step, the relevant coordinates are extracted
using the
IndexSet with the
473 * extracted coupling_dofs.
476 *
for (
const auto element : coupling_dofs)
477 *
for (
int i = 0; i < dim; ++i)
478 * interface_nodes_positions.push_back(support_points[element][i]);
482 * Now we have all information to define the coupling mesh and pass the
483 * information to preCICE.
486 * precice.setMeshVertices(mesh_name,
487 * interface_nodes_positions,
488 * interface_nodes_ids);
492 * Then, we initialize preCICE internally calling the API function
496 * precice.initialize();
499 *
template <
int dim,
typename ParameterClass>
501 * Adapter<dim, ParameterClass>::read_data(
502 *
double relative_read_time,
503 * std::map<types::global_dof_index, double> &boundary_data)
507 * here, we obtain data, i.e. the boundary condition, from another
508 * participant. We have already vertex IDs and just need to convert our
509 * obtained data to the deal.II compatible
'boundary map' , which is done in
510 * the format_deal_to_precice function.
513 * precice.readData(mesh_name,
515 * interface_nodes_ids,
516 * relative_read_time,
521 * After receiving the coupling data in `read_data_buffer`, we convert it to
522 * the std::map `boundary_data` which is later needed in order to
apply
523 * Dirichlet boundary conditions
526 * format_precice_to_dealii(boundary_data);
532 * The function `
advance()` is called in the main time
loop after the
533 * computation in each time step. Here, preCICE exchanges the coupling data
534 * internally and computes mappings as well as acceleration methods.
537 *
template <
int dim,
typename ParameterClass>
539 * Adapter<dim, ParameterClass>::advance(
const double computed_timestep_length)
543 * We specify the computed time-step length and pass it to preCICE.
546 * precice.advance(computed_timestep_length);
553 * This function takes the std::vector obtained by preCICE in `read_data_buffer` and
554 * inserts the values to the right position in the boundary map used throughout
555 * our deal.II solver
for Dirichlet boundary conditions. The function is only
556 * used internally in the Adapter
class and not called in the solver itself. The
557 * order, in which preCICE sorts the data in the `read_data_buffer` vector is exactly
558 * the same as the order of the initially passed vertices coordinates.
561 *
template <
int dim,
typename ParameterClass>
563 * Adapter<dim, ParameterClass>::format_precice_to_dealii(
564 * std::map<types::global_dof_index, double> &boundary_data)
const
568 * We already stored the coupling DoF indices in the `boundary_data` map, so
569 * that we can simply iterate over all keys in the map.
572 *
auto dof_component = boundary_data.begin();
573 *
for (
int i = 0; i < n_interface_nodes; ++i)
576 * boundary_data[dof_component->first] = read_data_buffer[i];
584 * The solver
class is essentially the same as in @ref step_4
"step-4". We only extend the
585 * stationary problem to a time-dependent problem and introduced the coupling.
586 * Comments are added at any
point, where the workflow differs from @ref step_4
"step-4".
590 *
class CoupledLaplaceProblem
593 * CoupledLaplaceProblem();
608 * output_results()
const;
624 * We allocate all structures required
for the preCICE coupling: The map
625 * is used to
apply Dirichlet boundary conditions and filled in the Adapter
626 *
class with data from the other participant. The CouplingParameters hold the
627 * preCICE configuration as described above. The
interface boundary ID is the
628 * ID associated to our coupling
interface and needs to be specified, when we
629 * set up the Adapter
class object, because we pass it directly to the
630 * Constructor of
this class.
633 * std::map<types::global_dof_index, double> boundary_data;
634 * CouplingParamters parameters;
636 * Adapter<dim, CouplingParamters> adapter;
640 * The time-step size delta_t is the actual time-step size used
for all
641 * computations. The preCICE time-step size is obtained by preCICE in order to
642 * ensure a synchronization at all coupling time steps. The solver time
643 * step-size is the desired time-step size of our individual solver. In more
644 * sophisticated computations, it might be determined adaptively. The
645 * `time_step` counter is just used
for the time-step number.
649 *
double precice_delta_t;
650 *
const double solver_delta_t = 0.1;
651 *
unsigned int time_step = 0;
657 *
class RightHandSide :
public Function<dim>
661 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
667 *
class BoundaryValues :
public Function<dim>
671 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
676 * RightHandSide<dim>::value(
const Point<dim> &p,
677 *
const unsigned int )
const
679 *
double return_value = 0.0;
680 *
for (
unsigned int i = 0; i < dim; ++i)
681 * return_value += 4.0 *
std::pow(p(i), 4.0);
683 *
return return_value;
689 * BoundaryValues<dim>::value(
const Point<dim> &p,
690 *
const unsigned int )
const
698 * CoupledLaplaceProblem<dim>::CoupledLaplaceProblem()
700 * , dof_handler(triangulation)
701 * , interface_boundary_id(1)
702 * , adapter(parameters, interface_boundary_id)
708 * CoupledLaplaceProblem<dim>::make_grid()
711 * triangulation.refine_global(4);
713 *
for (
const auto &cell : triangulation.active_cell_iterators())
714 *
for (
const auto &face : cell->face_iterators())
718 * We choose the boundary in
positive x direction
for the
719 *
interface coupling.
722 *
if (face->at_boundary() && (face->center()[0] == 1))
723 * face->set_boundary_id(interface_boundary_id);
726 * std::cout <<
" Number of active cells: " << triangulation.n_active_cells()
728 * <<
" Total number of cells: " << triangulation.n_cells()
735 * CoupledLaplaceProblem<dim>::setup_system()
737 * dof_handler.distribute_dofs(fe);
739 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
744 * sparsity_pattern.copy_from(dsp);
746 * system_matrix.reinit(sparsity_pattern);
748 * solution.reinit(dof_handler.n_dofs());
749 * old_solution.reinit(dof_handler.n_dofs());
750 * system_rhs.reinit(dof_handler.n_dofs());
757 * CoupledLaplaceProblem<dim>::assemble_system()
761 * Reset global structures
768 * Update old solution values
771 * old_solution = solution;
775 * RightHandSide<dim> right_hand_side;
778 * quadrature_formula,
782 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
786 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
789 * The solution values from previous time steps are stored
for each quadrature
793 * std::vector<double> local_values_old_solution(fe_values.n_quadrature_points);
795 *
for (
const auto &cell : dof_handler.active_cell_iterators())
797 * fe_values.reinit(cell);
802 * Get the local values from the `fe_values
' object
805 * fe_values.get_function_values(old_solution, local_values_old_solution);
809 * The system matrix contains additionally a mass matrix due to the time
810 * discretization. The RHS has contributions from the old solution values.
813 * for (const unsigned int q_index : fe_values.quadrature_point_indices())
814 * for (const unsigned int i : fe_values.dof_indices())
816 * for (const unsigned int j : fe_values.dof_indices())
817 * cell_matrix(i, j) +=
818 * ((fe_values.shape_value(i, q_index) * // phi_i(x_q)
819 * fe_values.shape_value(j, q_index)) + // phi_j(x_q)
820 * (delta_t * // delta t
821 * fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
822 * fe_values.shape_grad(j, q_index))) * // grad phi_j(x_q)
823 * fe_values.JxW(q_index); // dx
825 * const auto x_q = fe_values.quadrature_point(q_index);
826 * const auto &local_value = local_values_old_solution[q_index];
827 * cell_rhs(i) += ((delta_t * // delta t
828 * fe_values.shape_value(i, q_index) * // phi_i(x_q)
829 * right_hand_side.value(x_q)) + // f(x_q)
830 * fe_values.shape_value(i, q_index) *
831 * local_value) * // phi_i(x_q)*val
832 * fe_values.JxW(q_index); // dx
837 * Copy local to global
840 * cell->get_dof_indices(local_dof_indices);
841 * for (const unsigned int i : fe_values.dof_indices())
843 * for (const unsigned int j : fe_values.dof_indices())
844 * system_matrix.add(local_dof_indices[i],
845 * local_dof_indices[j],
846 * cell_matrix(i, j));
848 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
854 * At first, we apply the Dirichlet boundary condition from @ref step_4 "step-4", as
858 * std::map<types::global_dof_index, double> boundary_values;
859 * VectorTools::interpolate_boundary_values(dof_handler,
861 * BoundaryValues<dim>(),
863 * MatrixTools::apply_boundary_values(boundary_values,
871 * Afterwards, we apply the coupling boundary condition. The `boundary_data`
872 * has already been filled by preCICE.
875 * MatrixTools::apply_boundary_values(boundary_data,
886 * CoupledLaplaceProblem<dim>::solve()
888 * SolverControl solver_control(1000, 1e-12);
889 * SolverCG<Vector<double>> solver(solver_control);
890 * solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
892 * std::cout << " " << solver_control.last_step()
893 * << " CG iterations needed to obtain convergence." << std::endl;
900 * CoupledLaplaceProblem<dim>::output_results() const
902 * DataOut<dim> data_out;
904 * data_out.attach_dof_handler(dof_handler);
905 * data_out.add_data_vector(solution, "solution");
907 * data_out.build_patches(mapping);
909 * std::ofstream output("solution-" + std::to_string(time_step) + ".vtk");
910 * data_out.write_vtk(output);
917 * CoupledLaplaceProblem<dim>::run()
919 * std::cout << "Solving problem in " << dim << " space dimensions."
927 * After we set up the system, we initialize preCICE and the adapter using the
928 * functionalities of the Adapter.
931 * adapter.initialize(dof_handler, boundary_data, mapping);
935 * preCICE steers the coupled simulation: `isCouplingOngoing` is
936 * used to synchronize the end of the simulation with the coupling partner
939 * while (adapter.precice.isCouplingOngoing())
943 * The time step number is solely used to generate unique output files
949 * preCICE returns the maximum admissible time-step size,
950 * which needs to be compared to our desired solver time-step size.
953 * precice_delta_t = adapter.precice.getMaxTimeStepSize();
954 * delta_t = std::min(precice_delta_t, solver_delta_t);
957 * Next we read data. Since we use a fully backward Euler method, we want
958 * the data to be associated to the end of the current time-step (delta_t)
959 * Time-interpolation methods in preCICE allow to get readData at any
960 * point in time, if the coupling scheme allows it
963 * adapter.read_data(delta_t, boundary_data);
967 * In the time loop, we assemble the coupled system and solve it as
976 * After we solved the system, we advance the coupling to the next time
977 * level. In a bi-directional coupled simulation, we would pass our
978 * calculated data to preCICE
981 * adapter.advance(delta_t);
985 * Write an output file if the time step is completed. In case of an
986 * implicit coupling, where individual time steps are computed more than
987 * once, the function `isTimeWindowCompleted` prevents unnecessary result
988 * writing. For this simple tutorial configuration (explicit coupling),
989 * the function returns always `true`.
992 * if (adapter.precice.isTimeWindowComplete())
1002 * CoupledLaplaceProblem<2> laplace_problem;
1003 * laplace_problem.run();
1010<a name="ann-fancy_boundary_condition.cc"></a>
1011<h1>Annotated version of fancy_boundary_condition.cc</h1>
1017 * /* -----------------------------------------------------------------------------
1019 * * SPDX-License-Identifier: LGPL-2.1-or-later
1020 * * Copyright (C) 2020 by David Schneider
1021 * * Copyright (C) 2020 by Benjamin Uekermann
1023 * * This file is part of the deal.II code gallery.
1025 * * -----------------------------------------------------------------------------
1030 * This program does not use any deal.II functionality and depends only on
1031 * preCICE and the standard libraries.
1034 * #include <precice/precice.hpp>
1036 * #include <iostream>
1037 * #include <sstream>
1042 * The program computes a time-varying parabolic boundary condition, which is
1043 * passed to preCICE and serves as Dirichlet boundary condition for the other
1044 * coupling participant.
1048 * Function to generate boundary values in each time step
1052 * define_boundary_values(std::vector<double> &boundary_data,
1053 * const double time,
1054 * const double end_time)
1058 * Scale the current time value
1061 * const double relative_time = time / end_time;
1064 * Define the amplitude. Values run from -0.5 to 0.5
1067 * const double amplitude = (relative_time - 0.5);
1070 * Specify the actual data we want to pass to the other participant. Here, we
1071 * choose a parabola with boundary values 2 in order to enforce continuity
1072 * to adjacent boundaries.
1075 * const double n_elements = boundary_data.size();
1076 * const double right_zero = boundary_data.size() - 1;
1077 * const double left_zero = 0;
1078 * const double offset = 2;
1079 * for (uint i = 0; i < n_elements; ++i)
1080 * boundary_data[i] =
1081 * -amplitude * ((i - left_zero) * (i - right_zero)) + offset;
1088 * std::cout << "Boundary participant: starting... \n";
1095 * const std::string configFileName("precice-config.xml");
1096 * const std::string solverName("boundary-participant");
1097 * const std::string meshName("boundary-mesh");
1098 * const std::string dataWriteName("boundary-data");
1102 * Adjust to MPI rank and size for parallel computation
1105 * const int commRank = 0;
1106 * const int commSize = 1;
1108 * precice::Participant precice(solverName, configFileName, commRank, commSize);
1110 * const int dimensions = precice.getMeshDimensions(meshName);
1111 * const int numberOfVertices = 6;
1115 * Set up data structures
1118 * std::vector<double> writeData(numberOfVertices);
1119 * std::vector<int> vertexIDs(numberOfVertices);
1120 * std::vector<double> vertices(numberOfVertices * dimensions);
1124 * Define a boundary mesh
1127 * std::cout << "Boundary participant: defining boundary mesh \n";
1128 * const double length = 2;
1129 * const double xCoord = 1;
1130 * const double deltaY = length / (numberOfVertices - 1);
1131 * for (int i = 0; i < numberOfVertices; ++i)
1132 * for (int j = 0; j < dimensions; ++j)
1134 * const unsigned int index = dimensions * i + j;
1137 * The x-coordinate is always 1, i.e., the boundary is parallel to the
1138 * y-axis. The y-coordinate is descending from 1 to -1.
1142 * vertices[index] = xCoord;
1144 * vertices[index] = 1 - deltaY * i;
1149 * Pass the vertices to preCICE
1152 * precice.setMeshVertices(meshName, vertices, vertexIDs);
1156 * Variables for the time
1159 * const double end_time = 1;
1164 * Not used in the configuration by default
1167 * if (precice.requiresInitialData())
1169 * std::cout << "Boundary participant: writing initial data \n";
1170 * define_boundary_values(writeData, time, end_time);
1171 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1176 * initialize the Participant
1179 * precice.initialize();
1186 * while (precice.isCouplingOngoing())
1188 * double dt = precice.getMaxTimeStepSize();
1193 * Generate new boundary data
1196 * define_boundary_values(writeData, time, end_time);
1198 * std::cout << "Boundary participant: writing coupling data \n";
1199 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1201 * std::cout << "Boundary participant: advancing in time\n";
1202 * precice.advance(dt);
1205 * std::cout << "Boundary participant: closing...\n";
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
const bool IsBlockVector< VectorType >::value
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
MappingQ< dim, spacedim > MappingQGeneric
MappingQ< dim, spacedim > StaticMappingQ1< dim, spacedim >::mapping
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
void advance(std::tuple< I1, I2 > &t, const unsigned int n)