deal.II version 9.7.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
index_set.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2009 - 2025 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17#include <deal.II/base/mpi.h>
18
21
22#include <boost/container/small_vector.hpp>
23
24#include <vector>
25
26#ifdef DEAL_II_WITH_TRILINOS
28# ifdef DEAL_II_WITH_MPI
29# include <Epetra_MpiComm.h>
30# endif
31# include <Epetra_Map.h>
32# include <Epetra_SerialComm.h>
33# ifdef DEAL_II_TRILINOS_WITH_TPETRA
34# include <Tpetra_Map.hpp>
35# endif
37#endif
38
40
41
42
43#ifdef DEAL_II_WITH_TRILINOS
44
45# ifdef DEAL_II_TRILINOS_WITH_TPETRA
46
47template <typename NodeType>
49 const Teuchos::RCP<
50 const Tpetra::Map<int, types::signed_global_dof_index, NodeType>> &map)
51 : is_compressed(true)
52 , index_space_size(1 + map->getMaxAllGlobalIndex())
53 , largest_range(numbers::invalid_unsigned_int)
54{
55 Assert(map->getMinAllGlobalIndex() == 0,
57 "The Tpetra::Map does not contain the global index 0, "
58 "which means some entries are not present on any processor."));
59
60 // For a contiguous map, we do not need to go through the whole data...
61 if (map->isContiguous())
62 add_range(size_type(map->getMinGlobalIndex()),
63 size_type(map->getMaxGlobalIndex() + 1));
64 else
65 {
66# if DEAL_II_TRILINOS_VERSION_GTE(13, 4, 0)
67 const size_type n_indices = map->getLocalNumElements();
68# else
69 const size_type n_indices = map->getNodeNumElements();
70# endif
71 const types::signed_global_dof_index *indices =
72 map->getMyGlobalIndices().data();
73 add_indices(indices, indices + n_indices);
74 }
75 compress();
76}
77
78# endif // DEAL_II_TRILINOS_WITH_TPETRA
79
80
81// the 64-bit path uses a few different names, so put that into a separate
82// implementation
83
84# ifdef DEAL_II_WITH_64BIT_INDICES
85
86IndexSet::IndexSet(const Epetra_BlockMap &map)
87 : is_compressed(true)
88 , index_space_size(1 + map.MaxAllGID64())
89 , largest_range(numbers::invalid_unsigned_int)
90{
91 Assert(map.MinAllGID64() == 0,
93 "The Epetra_BlockMap does not contain the global index 0, "
94 "which means some entries are not present on any processor."));
95
96 // For a contiguous map, we do not need to go through the whole data...
97 if (map.LinearMap())
98 add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64() + 1));
99 else
100 {
101 const size_type n_indices = map.NumMyElements();
102 size_type *indices =
103 reinterpret_cast<size_type *>(map.MyGlobalElements64());
104 add_indices(indices, indices + n_indices);
105 }
106 compress();
107}
108
109# else
110
111// this is the standard 32-bit implementation
112
113IndexSet::IndexSet(const Epetra_BlockMap &map)
114 : is_compressed(true)
115 , index_space_size(1 + map.MaxAllGID())
116 , largest_range(numbers::invalid_unsigned_int)
117{
118 Assert(map.MinAllGID() == 0,
120 "The Epetra_BlockMap does not contain the global index 0, "
121 "which means some entries are not present on any processor."));
122
123 // For a contiguous map, we do not need to go through the whole data...
124 if (map.LinearMap())
125 add_range(size_type(map.MinMyGID()), size_type(map.MaxMyGID() + 1));
126 else
127 {
128 const size_type n_indices = map.NumMyElements();
129 unsigned int *indices =
130 reinterpret_cast<unsigned int *>(map.MyGlobalElements());
131 add_indices(indices, indices + n_indices);
132 }
133 compress();
134}
135
136# endif
137
138#endif // ifdef DEAL_II_WITH_TRILINOS
139
140
141
142void
144{
145 {
146 // we will, in the following, modify mutable variables. this can only
147 // work in multithreaded applications if we lock the data structures
148 // via a mutex, so that users can call 'const' functions from threads
149 // in parallel (and these 'const' functions can then call compress()
150 // which itself calls the current function)
151 std::lock_guard<std::mutex> lock(compress_mutex);
152
153 // see if any of the contiguous ranges can be merged. do not use
154 // std::vector::erase in-place as it is quadratic in the number of
155 // ranges. since the ranges are sorted by their first index, determining
156 // overlap isn't all that hard
157 std::vector<Range>::iterator store = ranges.begin();
158 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();)
159 {
160 std::vector<Range>::iterator next = i;
161 ++next;
162
163 size_type first_index = i->begin;
164 size_type last_index = i->end;
165
166 // see if we can merge any of the following ranges
167 while (next != ranges.end() && (next->begin <= last_index))
168 {
169 last_index = std::max(last_index, next->end);
170 ++next;
171 }
172 i = next;
173
174 // store the new range in the slot we last occupied
175 *store = Range(first_index, last_index);
176 ++store;
177 }
178 // use a compact array with exactly the right amount of storage
179 if (store != ranges.end())
180 {
181 std::vector<Range> new_ranges(ranges.begin(), store);
182 ranges.swap(new_ranges);
183 }
184
185 // now compute indices within set and the range with most elements
186 size_type next_index = 0, largest_range_size = 0;
187 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();
188 ++i)
189 {
190 Assert(i->begin < i->end, ExcInternalError());
191
192 i->nth_index_in_set = next_index;
193 next_index += (i->end - i->begin);
194 if (i->end - i->begin > largest_range_size)
195 {
196 largest_range_size = i->end - i->begin;
197 largest_range = i - ranges.begin();
198 }
199 }
200 is_compressed = true;
201
202 // check that next_index is correct. needs to be after the previous
203 // statement because we otherwise will get into an endless loop
204 Assert(next_index == n_elements(), ExcInternalError());
205 }
206
207 if constexpr (running_in_debug_mode())
208 {
209 // A consistency check: We should only ever have added indices
210 // that are within the range of the index set. Instead of doing
211 // this in every one of the many functions that add indices,
212 // do this in the current, central location
213 for (const auto &range : ranges)
214 Assert((range.begin < index_space_size) &&
215 (range.end <= index_space_size),
217 "In the process of creating the current IndexSet "
218 "object, you added indices beyond the size of the index "
219 "space. Specifically, you added elements that form the "
220 "range [" +
221 std::to_string(range.begin) + "," + std::to_string(range.end) +
222 "), but the size of the index space is only " +
223 std::to_string(index_space_size) + "."));
224 }
225}
226
227
228
229#ifndef DOXYGEN
231IndexSet::operator&(const IndexSet &is) const
232{
233 Assert(size() == is.size(), ExcDimensionMismatch(size(), is.size()));
234
235 compress();
236 is.compress();
237
238 std::vector<Range>::const_iterator r1 = ranges.begin(),
239 r2 = is.ranges.begin();
240 IndexSet result(size());
241
242 while ((r1 != ranges.end()) && (r2 != is.ranges.end()))
243 {
244 // if r1 and r2 do not overlap at all, then move the pointer that sits
245 // to the left of the other up by one
246 if (r1->end <= r2->begin)
247 ++r1;
248 else if (r2->end <= r1->begin)
249 ++r2;
250 else
251 {
252 // the ranges must overlap somehow
253 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
254 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
256
257 // add the overlapping range to the result
258 result.add_range(std::max(r1->begin, r2->begin),
259 std::min(r1->end, r2->end));
260
261 // now move that iterator that ends earlier one up. note that it has
262 // to be this one because a subsequent range may still have a chance
263 // of overlapping with the range that ends later
264 if (r1->end <= r2->end)
265 ++r1;
266 else
267 ++r2;
268 }
269 }
270
271 result.compress();
272 return result;
273}
274#endif
275
276
277
280{
281 Assert(begin <= end,
282 ExcMessage("End index needs to be larger or equal to begin index!"));
283 Assert(end <= size(),
284 ExcMessage("You are asking for a view into an IndexSet object "
285 "that would cover the sub-range [" +
286 std::to_string(begin) + ',' + std::to_string(end) +
287 "). But this is not a subset of the range "
288 "of the current object, which is [0," +
289 std::to_string(size()) + ")."));
290
291 IndexSet result(end - begin);
292 std::vector<Range>::const_iterator r1 = ranges.begin();
293
294 while (r1 != ranges.end())
295 {
296 if ((r1->end > begin) && (r1->begin < end))
297 {
298 result.add_range(std::max(r1->begin, begin) - begin,
299 std::min(r1->end, end) - begin);
300 }
301 else if (r1->begin >= end)
302 break;
303
304 ++r1;
305 }
306
307 result.compress();
308 return result;
309}
310
311
312
314IndexSet::get_view(const IndexSet &mask) const
315{
316 Assert(size() == mask.size(),
317 ExcMessage("The mask must have the same size index space "
318 "as the index set it is applied to."));
319
320 // If 'other' is an empty set, then the view is also empty:
321 if (mask == IndexSet())
322 return {};
323
324 // For everything, it is more efficient to work on compressed sets:
325 compress();
326 mask.compress();
327
328 // If 'other' has a single range, then we can just defer to the
329 // previous function
330 if (mask.ranges.size() == 1)
331 return get_view(mask.ranges[0].begin, mask.ranges[0].end);
332
333 // For the general case where the mask is an arbitrary set,
334 // the situation is slightly more complicated. We need to walk
335 // the ranges of the two index sets in parallel and search for
336 // overlaps, and then appropriately shift
337
338 // we save all new ranges to our IndexSet in an temporary vector and
339 // add all of them in one go at the end.
340 std::vector<Range> new_ranges;
341
342 std::vector<Range>::iterator own_it = ranges.begin();
343 std::vector<Range>::iterator mask_it = mask.ranges.begin();
344
345 while ((own_it != ranges.end()) && (mask_it != mask.ranges.end()))
346 {
347 // If our own range lies completely ahead of the current
348 // range in the mask, move forward and start the loop body
349 // anew. If this was the last range, the 'while' loop above
350 // will terminate, so we don't have to check for end iterators
351 if (own_it->end <= mask_it->begin)
352 {
353 ++own_it;
354 continue;
355 }
356
357 // Do the same if the current mask range lies completely ahead of
358 // the current range of the this object:
359 if (mask_it->end <= own_it->begin)
360 {
361 ++mask_it;
362 continue;
363 }
364
365 // Now own_it and other_it overlap. Check that that is true by
366 // enumerating the cases that can happen. This is
367 // surprisingly tricky because the two intervals can intersect in
368 // a number of different ways, but there really are only the four
369 // following possibilities:
370
371 // Case 1: our interval overlaps the left end of the other interval
372 //
373 // So we need to add the elements from the first element of the mask's
374 // interval to the end of our own interval. But we need to shift the
375 // indices so that they correspond to the how many'th element within the
376 // mask this is; fortunately (because we compressed the mask), this
377 // is recorded in the mask's ranges.
378 if ((own_it->begin <= mask_it->begin) && (own_it->end <= mask_it->end))
379 {
380 new_ranges.emplace_back(mask_it->begin - mask_it->nth_index_in_set,
381 own_it->end - mask_it->nth_index_in_set);
382 }
383 else
384 // Case 2:our interval overlaps the tail end of the other interval
385 if ((mask_it->begin <= own_it->begin) && (mask_it->end <= own_it->end))
386 {
387 const size_type offset_within_mask_interval =
388 own_it->begin - mask_it->begin;
389 new_ranges.emplace_back(mask_it->nth_index_in_set +
390 offset_within_mask_interval,
391 mask_it->nth_index_in_set +
392 (mask_it->end - mask_it->begin));
393 }
394 else
395 // Case 3: Our own interval completely encloses the other interval
396 if ((own_it->begin <= mask_it->begin) &&
397 (own_it->end >= mask_it->end))
398 {
399 new_ranges.emplace_back(mask_it->begin -
400 mask_it->nth_index_in_set,
401 mask_it->end - mask_it->nth_index_in_set);
402 }
403 else
404 // Case 3: The other interval completely encloses our own interval
405 if ((mask_it->begin <= own_it->begin) &&
406 (mask_it->end >= own_it->end))
407 {
408 const size_type offset_within_mask_interval =
409 own_it->begin - mask_it->begin;
410 new_ranges.emplace_back(mask_it->nth_index_in_set +
411 offset_within_mask_interval,
412 mask_it->nth_index_in_set +
413 offset_within_mask_interval +
414 (own_it->end - own_it->begin));
415 }
416 else
418
419 // We considered the overlap of these two intervals. It may of course
420 // be that one of them overlaps with another one, but that can only
421 // be the case for the interval that extends further to the right. So
422 // we can safely move on from the interval that terminates earlier:
423 if (own_it->end < mask_it->end)
424 ++own_it;
425 else if (mask_it->end < own_it->end)
426 ++mask_it;
427 else
428 {
429 // The intervals ended at the same point. We can move on from both.
430 // (The algorithm would also work if we only moved on from one,
431 // but we can micro-optimize here without too much effort.)
432 ++own_it;
433 ++mask_it;
434 }
435 }
436
437 // Now turn the ranges of overlap we have accumulated into an IndexSet in
438 // its own right:
439 IndexSet result(mask.n_elements());
440 for (const auto &range : new_ranges)
441 result.add_range(range.begin, range.end);
442 result.compress();
443
444 return result;
445}
446
447
448
449std::vector<IndexSet>
451 const std::vector<types::global_dof_index> &n_indices_per_block) const
452{
453 std::vector<IndexSet> partitioned;
454 const unsigned int n_blocks = n_indices_per_block.size();
455
456 partitioned.reserve(n_blocks);
457 types::global_dof_index start = 0;
458 for (const auto n_block_indices : n_indices_per_block)
459 {
460 partitioned.push_back(this->get_view(start, start + n_block_indices));
461 start += n_block_indices;
462 }
463
464 if constexpr (running_in_debug_mode())
465 {
467 for (const auto &partition : partitioned)
468 {
469 sum += partition.size();
470 }
471 AssertDimension(sum, this->size());
472 }
473
474 return partitioned;
475}
476
477
478
479void
481{
482 compress();
483 other.compress();
484 is_compressed = false;
485
486
487 // we save all new ranges to our IndexSet in an temporary vector and
488 // add all of them in one go at the end.
489 std::vector<Range> new_ranges;
490
491 std::vector<Range>::iterator own_it = ranges.begin();
492 std::vector<Range>::iterator other_it = other.ranges.begin();
493
494 while (own_it != ranges.end() && other_it != other.ranges.end())
495 {
496 // advance own iterator until we get an overlap
497 if (own_it->end <= other_it->begin)
498 {
499 new_ranges.push_back(*own_it);
500 ++own_it;
501 continue;
502 }
503 // we are done with other_it, so advance
504 if (own_it->begin >= other_it->end)
505 {
506 ++other_it;
507 continue;
508 }
509
510 // Now own_it and other_it overlap. First save the part of own_it that
511 // is before other_it (if not empty).
512 if (own_it->begin < other_it->begin)
513 {
514 Range r(own_it->begin, other_it->begin);
515 r.nth_index_in_set = 0; // fix warning of unused variable
516 new_ranges.push_back(r);
517 }
518 // change own_it to the sub range behind other_it. Do not delete own_it
519 // in any case. As removal would invalidate iterators, we just shrink
520 // the range to an empty one.
521 own_it->begin = other_it->end;
522 if (own_it->begin > own_it->end)
523 {
524 own_it->begin = own_it->end;
525 ++own_it;
526 }
527
528 // continue without advancing iterators, the right one will be advanced
529 // next.
530 }
531
532 // make sure to take over the remaining ranges
533 for (; own_it != ranges.end(); ++own_it)
534 new_ranges.push_back(*own_it);
535
536 ranges.clear();
537
538 // done, now add the temporary ranges
539 const std::vector<Range>::iterator end = new_ranges.end();
540 for (std::vector<Range>::iterator it = new_ranges.begin(); it != end; ++it)
541 add_range(it->begin, it->end);
542
543 compress();
544}
545
546
547
550{
551 IndexSet set(this->size() * other.size());
552 for (const auto el : *this)
553 set.add_indices(other, el * other.size());
554 set.compress();
555 return set;
556}
557
558
559
562{
563 Assert(is_empty() == false,
565 "pop_back() failed, because this IndexSet contains no entries."));
566
567 const size_type index = ranges.back().end - 1;
568 --ranges.back().end;
569
570 if (ranges.back().begin == ranges.back().end)
571 ranges.pop_back();
572
573 return index;
574}
575
576
577
580{
581 Assert(is_empty() == false,
583 "pop_front() failed, because this IndexSet contains no entries."));
584
585 const size_type index = ranges.front().begin;
586 ++ranges.front().begin;
587
588 if (ranges.front().begin == ranges.front().end)
589 ranges.erase(ranges.begin());
590
591 // We have to set this in any case, because nth_index_in_set is no longer
592 // up to date for all but the first range
593 is_compressed = false;
594
595 return index;
596}
597
598
599
600void
602{
603 // if the inserted range is already within the range we find by lower_bound,
604 // there is no need to do anything; we do not try to be clever here and
605 // leave all other work to compress().
606 const auto insert_position =
607 Utilities::lower_bound(ranges.begin(), ranges.end(), new_range);
608 if (insert_position == ranges.end() ||
609 insert_position->begin > new_range.begin ||
610 insert_position->end < new_range.end)
611 ranges.insert(insert_position, new_range);
612}
613
614
615
616void
618 boost::container::small_vector<std::pair<size_type, size_type>, 200>
619 &tmp_ranges,
620 const bool ranges_are_sorted)
621{
622 if (!ranges_are_sorted)
623 std::sort(tmp_ranges.begin(), tmp_ranges.end());
624
625 // if we have many ranges, we first construct a temporary index set (where
626 // we add ranges in a consecutive way, so fast), otherwise, we work with
627 // add_range(). the number 9 is chosen heuristically given the fact that
628 // there are typically up to 8 independent ranges when adding the degrees of
629 // freedom on a 3d cell or 9 when adding degrees of freedom of faces. if
630 // doing cell-by-cell additions, we want to avoid repeated calls to
631 // IndexSet::compress() which gets called upon merging two index sets, so we
632 // want to be in the other branch then.
633 if (tmp_ranges.size() > 9)
634 {
635 IndexSet tmp_set(size());
636 tmp_set.ranges.reserve(tmp_ranges.size());
637 for (const auto &i : tmp_ranges)
638 tmp_set.add_range(i.first, i.second);
639
640 // Case if we have zero or just one range: Add into the other set with
641 // its indices, as that is cheaper
642 if (this->ranges.size() <= 1)
643 {
644 if (this->ranges.size() == 1)
645 tmp_set.add_range(ranges[0].begin, ranges[0].end);
646 std::swap(*this, tmp_set);
647 }
648 else
649 this->add_indices(tmp_set);
650 }
651 else
652 for (const auto &i : tmp_ranges)
653 add_range(i.first, i.second);
654}
655
656
657
658void
659IndexSet::add_indices(const IndexSet &other, const size_type offset)
660{
661 if ((this == &other) && (offset == 0))
662 return;
663
664 if (other.ranges.size() != 0)
665 {
666 AssertIndexRange(other.ranges.back().end - 1, index_space_size);
667 }
668
669 compress();
670 other.compress();
671
672 std::vector<Range>::const_iterator r1 = ranges.begin(),
673 r2 = other.ranges.begin();
674
675 std::vector<Range> new_ranges;
676 // just get the start and end of the ranges right in this method, everything
677 // else will be done in compress()
678 while (r1 != ranges.end() || r2 != other.ranges.end())
679 {
680 // the two ranges do not overlap or we are at the end of one of the
681 // ranges
682 if (r2 == other.ranges.end() ||
683 (r1 != ranges.end() && r1->end < (r2->begin + offset)))
684 {
685 new_ranges.push_back(*r1);
686 ++r1;
687 }
688 else if (r1 == ranges.end() || (r2->end + offset) < r1->begin)
689 {
690 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
691 ++r2;
692 }
693 else
694 {
695 // ok, we do overlap, so just take the combination of the current
696 // range (do not bother to merge with subsequent ranges)
697 Range next(std::min(r1->begin, r2->begin + offset),
698 std::max(r1->end, r2->end + offset));
699 new_ranges.push_back(next);
700 ++r1;
701 ++r2;
702 }
703 }
704 ranges.swap(new_ranges);
705
706 is_compressed = false;
707 compress();
708}
709
710
711
712bool
714{
715 Assert(size() == other.size(),
716 ExcMessage("One index set can only be a subset of another if they "
717 "describe index spaces of the same size. The ones in "
718 "question here have sizes " +
719 std::to_string(size()) + " and " +
720 std::to_string(other.size()) + "."));
721
722 // See whether there are indices in the current set that are not in 'other'.
723 // If so, then this is clearly not a subset of 'other'.
724 IndexSet A_minus_B = *this;
725 A_minus_B.subtract_set(other);
726 if (A_minus_B.n_elements() > 0)
727 return false;
728 else
729 // Else, every index in 'this' is also in 'other', since we ended up
730 // with an empty set upon subtraction. This means that we have a subset:
731 return true;
732}
733
734
735
736void
737IndexSet::write(std::ostream &out) const
738{
739 compress();
740 out << size() << " ";
741 out << ranges.size() << std::endl;
742 std::vector<Range>::const_iterator r = ranges.begin();
743 for (; r != ranges.end(); ++r)
744 {
745 out << r->begin << " " << r->end << std::endl;
746 }
747}
748
749
750
751void
752IndexSet::read(std::istream &in)
753{
754 AssertThrow(in.fail() == false, ExcIO());
755
756 size_type s;
757 unsigned int n_ranges;
758
759 in >> s >> n_ranges;
760 ranges.clear();
761 set_size(s);
762 for (unsigned int i = 0; i < n_ranges; ++i)
763 {
764 AssertThrow(in.fail() == false, ExcIO());
765
766 size_type b, e;
767 in >> b >> e;
768 add_range(b, e);
769 }
770}
771
772
773void
774IndexSet::block_write(std::ostream &out) const
775{
776 AssertThrow(out.fail() == false, ExcIO());
777 out.write(reinterpret_cast<const char *>(&index_space_size),
778 sizeof(index_space_size));
779 std::size_t n_ranges = ranges.size();
780 out.write(reinterpret_cast<const char *>(&n_ranges), sizeof(n_ranges));
781 if (ranges.empty() == false)
782 out.write(reinterpret_cast<const char *>(&*ranges.begin()),
783 ranges.size() * sizeof(Range));
784 AssertThrow(out.fail() == false, ExcIO());
785}
786
787void
788IndexSet::block_read(std::istream &in)
789{
791 std::size_t n_ranges;
792 in.read(reinterpret_cast<char *>(&size), sizeof(size));
793 in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
794 // we have to clear ranges first
795 ranges.clear();
796 set_size(size);
797 ranges.resize(n_ranges, Range(0, 0));
798 if (n_ranges != 0u)
799 in.read(reinterpret_cast<char *>(&*ranges.begin()),
800 ranges.size() * sizeof(Range));
801
802 do_compress(); // needed so that largest_range can be recomputed
803}
804
805
806
807bool
809{
810 // get the element after which we would have to insert a range that
811 // consists of all elements from this element to the end of the index
812 // range plus one. after this call we know that if p!=end() then
813 // p->begin<=index unless there is no such range at all
814 //
815 // if the searched for element is an element of this range, then we're
816 // done. otherwise, the element can't be in one of the following ranges
817 // because otherwise p would be a different iterator
818 //
819 // since we already know the position relative to the largest range (we
820 // called compress!), we can perform the binary search on ranges with
821 // lower/higher number compared to the largest range
822 std::vector<Range>::const_iterator p = std::upper_bound(
823 ranges.begin() +
824 (index < ranges[largest_range].begin ? 0 : largest_range + 1),
825 index < ranges[largest_range].begin ? ranges.begin() + largest_range :
826 ranges.end(),
827 Range(index, size() + 1));
828
829 if (p == ranges.begin())
830 return ((index >= p->begin) && (index < p->end));
831
832 Assert((p == ranges.end()) || (p->begin > index), ExcInternalError());
833
834 // now move to that previous range
835 --p;
836 Assert(p->begin <= index, ExcInternalError());
837
838 return (p->end > index);
839}
840
841
842
845{
846 // find out which chunk the local index n belongs to by using a binary
847 // search. the comparator is based on the end of the ranges.
848 Range r(n, n + 1);
849 r.nth_index_in_set = n;
850
851 const std::vector<Range>::const_iterator p = Utilities::lower_bound(
852 ranges.begin(), ranges.end(), r, Range::nth_index_compare);
853
854 Assert(p != ranges.end(), ExcInternalError());
855 return p->begin + (n - p->nth_index_in_set);
856}
857
858
859
862{
863 // we could try to use the main range for splitting up the search range, but
864 // since we only come here when the largest range did not contain the index,
865 // there is little gain from doing a first step manually.
866 Range r(n, n);
867 std::vector<Range>::const_iterator p =
869
870 // if n is not in this set
871 if (p == ranges.end() || p->end == n || p->begin > n)
873
874 Assert(p != ranges.end(), ExcInternalError());
875 Assert(p->begin <= n, ExcInternalError());
876 Assert(n < p->end, ExcInternalError());
877 return (n - p->begin) + p->nth_index_in_set;
878}
879
880
881
883IndexSet::at(const size_type global_index) const
884{
885 compress();
886 AssertIndexRange(global_index, size());
887
888 if (ranges.empty())
889 return end();
890
891 std::vector<Range>::const_iterator main_range =
892 ranges.begin() + largest_range;
893
894 Range r(global_index, global_index + 1);
895 // This optimization makes the bounds for lower_bound smaller by checking
896 // the largest range first.
897 std::vector<Range>::const_iterator range_begin, range_end;
898 if (global_index < main_range->begin)
899 {
900 range_begin = ranges.begin();
901 range_end = main_range;
902 }
903 else
904 {
905 range_begin = main_range;
906 range_end = ranges.end();
907 }
908
909 // This will give us the first range p=[a,b[ with b>=global_index using
910 // a binary search
911 const std::vector<Range>::const_iterator p =
912 Utilities::lower_bound(range_begin, range_end, r, Range::end_compare);
913
914 // We couldn't find a range, which means we have no range that contains
915 // global_index and also no range behind it, meaning we need to return end().
916 if (p == ranges.end())
917 return end();
918
919 // Finally, we can have two cases: Either global_index is not in [a,b[,
920 // which means we need to return an iterator to a because global_index, ...,
921 // a-1 is not in the IndexSet (if branch). Alternatively, global_index is in
922 // [a,b[ and we will return an iterator pointing directly at global_index
923 // (else branch).
924 if (global_index < p->begin)
925 return {this, static_cast<size_type>(p - ranges.begin()), p->begin};
926 else
927 return {this, static_cast<size_type>(p - ranges.begin()), global_index};
928}
929
930
931
932std::vector<IndexSet::size_type>
934{
935 compress();
936
937 std::vector<size_type> indices;
938 indices.reserve(n_elements());
939
940 for (const auto &range : ranges)
941 for (size_type entry = range.begin; entry < range.end; ++entry)
942 indices.push_back(entry);
943
944 Assert(indices.size() == n_elements(), ExcInternalError());
945
946 return indices;
947}
948
949
950
951void
952IndexSet::fill_index_vector(std::vector<size_type> &indices) const
953{
954 indices = get_index_vector();
955}
956
957
958
959#ifdef DEAL_II_WITH_TRILINOS
960# ifdef DEAL_II_TRILINOS_WITH_TPETRA
961
962template <typename NodeType>
963Tpetra::Map<int, types::signed_global_dof_index, NodeType>
965 const bool overlapping) const
966{
967 return *make_tpetra_map_rcp<NodeType>(communicator, overlapping);
968}
969
970
971
972template <typename NodeType>
973Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index, NodeType>>
975 const bool overlapping) const
976{
977 compress();
978 (void)communicator;
979
980 if constexpr (running_in_debug_mode())
981 {
982 if (!overlapping)
983 {
984 const size_type n_global_elements =
985 Utilities::MPI::sum(n_elements(), communicator);
986 Assert(n_global_elements == size(),
987 ExcMessage("You are trying to create an Tpetra::Map object "
988 "that partitions elements of an index set "
989 "between processors. However, the union of the "
990 "index sets on different processors does not "
991 "contain all indices exactly once: the sum of "
992 "the number of entries the various processors "
993 "want to store locally is " +
994 std::to_string(n_global_elements) +
995 " whereas the total size of the object to be "
996 "allocated is " +
997 std::to_string(size()) +
998 ". In other words, there are "
999 "either indices that are not spoken for "
1000 "by any processor, or there are indices that are "
1001 "claimed by multiple processors."));
1002 }
1003 }
1004
1005 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
1006 // linear Tpetra::Map. Overlapping IndexSets are never 1:1.
1007 const bool linear =
1008 overlapping ? false : is_ascending_and_one_to_one(communicator);
1009 if (linear)
1011 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1012 size(),
1013 n_elements(),
1014 0,
1015# ifdef DEAL_II_WITH_MPI
1017 communicator)
1018# else
1019 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1020# endif // DEAL_II_WITH_MPI
1021 );
1022 else
1023 {
1024 const std::vector<size_type> indices = get_index_vector();
1025 std::vector<types::signed_global_dof_index> int_indices(indices.size());
1026 std::copy(indices.begin(), indices.end(), int_indices.begin());
1027 const Teuchos::ArrayView<types::signed_global_dof_index> arr_view(
1028 int_indices);
1029
1031 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1032 size(),
1033 arr_view,
1034 0,
1035# ifdef DEAL_II_WITH_MPI
1037 communicator)
1038# else
1039 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1040# endif // DEAL_II_WITH_MPI
1041 );
1042 }
1043}
1044# endif
1045
1046
1047
1048Epetra_Map
1050 const bool overlapping) const
1051{
1052 compress();
1053 (void)communicator;
1054
1055 if constexpr (running_in_debug_mode())
1056 {
1057 if (!overlapping)
1058 {
1059 const size_type n_global_elements =
1060 Utilities::MPI::sum(n_elements(), communicator);
1061 Assert(n_global_elements == size(),
1062 ExcMessage("You are trying to create an Epetra_Map object "
1063 "that partitions elements of an index set "
1064 "between processors. However, the union of the "
1065 "index sets on different processors does not "
1066 "contain all indices exactly once: the sum of "
1067 "the number of entries the various processors "
1068 "want to store locally is " +
1069 std::to_string(n_global_elements) +
1070 " whereas the total size of the object to be "
1071 "allocated is " +
1072 std::to_string(size()) +
1073 ". In other words, there are "
1074 "either indices that are not spoken for "
1075 "by any processor, or there are indices that are "
1076 "claimed by multiple processors."));
1077 }
1078 }
1079
1080 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
1081 // linear EpetraMap. Overlapping IndexSets are never 1:1.
1082 const bool linear =
1083 overlapping ? false : is_ascending_and_one_to_one(communicator);
1084
1085 if (linear)
1086 return Epetra_Map(TrilinosWrappers::types::int_type(size()),
1088 0,
1089# ifdef DEAL_II_WITH_MPI
1090 Epetra_MpiComm(communicator)
1091# else
1092 Epetra_SerialComm()
1093# endif
1094 );
1095 else
1096 {
1097 const std::vector<size_type> indices = get_index_vector();
1098 return Epetra_Map(
1101 (n_elements() > 0 ?
1102 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1103 indices.data()) :
1104 nullptr),
1105 0,
1106# ifdef DEAL_II_WITH_MPI
1107 Epetra_MpiComm(communicator)
1108# else
1109 Epetra_SerialComm()
1110# endif
1111 );
1112 }
1113}
1114#endif
1115
1116
1117#ifdef DEAL_II_WITH_PETSC
1118IS
1119IndexSet::make_petsc_is(const MPI_Comm communicator) const
1120{
1121 std::vector<size_type> indices;
1122 fill_index_vector(indices);
1123
1124 // If the size of the index set can be converted to a PetscInt then every
1125 // value can also be converted
1126 AssertThrowIntegerConversion(static_cast<PetscInt>(size()), size());
1127 const auto local_size = static_cast<PetscInt>(n_elements());
1128 AssertIntegerConversion(local_size, n_elements());
1129
1130 size_type i = 0;
1131 std::vector<PetscInt> petsc_indices(n_elements());
1132 for (const auto &index : *this)
1133 {
1134 const auto petsc_index = static_cast<PetscInt>(index);
1135 AssertIntegerConversion(petsc_index, index);
1136 petsc_indices[i] = petsc_index;
1137 ++i;
1138 }
1139
1140 IS is;
1141 PetscErrorCode ierr = ISCreateGeneral(
1142 communicator, local_size, petsc_indices.data(), PETSC_COPY_VALUES, &is);
1143 AssertThrow(ierr == 0, ExcPETScError(ierr));
1144
1145 return is;
1146}
1147#endif
1148
1149
1150
1151bool
1153{
1154 // If the sum of local elements does not add up to the total size,
1155 // the IndexSet can't be complete.
1156 const size_type n_global_elements =
1157 Utilities::MPI::sum(n_elements(), communicator);
1158 if (n_global_elements != size())
1159 return false;
1160
1161 if (n_global_elements == 0)
1162 return true;
1163
1164#ifdef DEAL_II_WITH_MPI
1165 // Non-contiguous IndexSets can't be linear.
1166 const bool all_contiguous =
1167 (Utilities::MPI::min(is_contiguous() ? 1 : 0, communicator) == 1);
1168 if (!all_contiguous)
1169 return false;
1170
1171 bool is_globally_ascending = true;
1172 // we know that there is only one interval
1173 types::global_dof_index first_local_dof = (n_elements() > 0) ?
1174 *(begin_intervals()->begin()) :
1176
1177 const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
1178 const std::vector<types::global_dof_index> global_dofs =
1179 Utilities::MPI::gather(communicator, first_local_dof, 0);
1180
1181 if (my_rank == 0)
1182 {
1183 // find out if the received std::vector is ascending
1184 types::global_dof_index index = 0;
1185 while (global_dofs[index] == numbers::invalid_dof_index)
1186 ++index;
1187 types::global_dof_index old_dof = global_dofs[index++];
1188 for (; index < global_dofs.size(); ++index)
1189 {
1190 const types::global_dof_index new_dof = global_dofs[index];
1191 if (new_dof != numbers::invalid_dof_index)
1192 {
1193 if (new_dof <= old_dof)
1194 {
1195 is_globally_ascending = false;
1196 break;
1197 }
1198 else
1199 old_dof = new_dof;
1200 }
1201 }
1202 }
1203
1204 // now broadcast the result
1205 int is_ascending = is_globally_ascending ? 1 : 0;
1206 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
1207 AssertThrowMPI(ierr);
1208
1209 return (is_ascending == 1);
1210#else
1211 return true;
1212#endif // DEAL_II_WITH_MPI
1213}
1214
1215
1216
1217std::size_t
1225
1226// explicit template instantiations
1227
1228#ifndef DOXYGEN
1229# ifdef DEAL_II_WITH_TRILINOS
1230# ifdef DEAL_II_TRILINOS_WITH_TPETRA
1231
1232template IndexSet::IndexSet(
1233 const Teuchos::RCP<const Tpetra::Map<
1234 int,
1237 &);
1238
1239# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1240 defined(KOKKOS_ENABLE_SYCL)
1241template IndexSet::IndexSet(
1242 const Teuchos::RCP<const Tpetra::Map<
1243 int,
1246 &);
1247# endif
1248
1252 const MPI_Comm,
1253 bool) const;
1254
1255# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1256 defined(KOKKOS_ENABLE_SYCL)
1261 const MPI_Comm,
1262 bool) const;
1263# endif
1264
1265template Teuchos::RCP<
1269 const MPI_Comm,
1270 bool) const;
1271
1272# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1273 defined(KOKKOS_ENABLE_SYCL)
1274template Teuchos::RCP<
1278 const MPI_Comm,
1279 bool) const;
1280# endif
1281
1282# endif
1283# endif
1284#endif
1285
bool is_subset_of(const IndexSet &other) const
Definition index_set.cc:713
bool is_element_binary_search(const size_type local_index) const
Definition index_set.cc:808
size_type index_within_set_binary_search(const size_type global_index) const
Definition index_set.cc:861
size_type largest_range
Definition index_set.h:1140
IS make_petsc_is(const MPI_Comm communicator=MPI_COMM_WORLD) const
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
bool is_contiguous() const
Definition index_set.h:1932
Tpetra::Map< int, types::signed_global_dof_index, NodeType > make_tpetra_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:964
void do_compress() const
Definition index_set.cc:143
ElementIterator at(const size_type global_index) const
Definition index_set.cc:883
size_type size() const
Definition index_set.h:1791
void fill_index_vector(std::vector< size_type > &indices) const
Definition index_set.cc:952
bool is_empty() const
Definition index_set.h:1941
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
Definition index_set.cc:450
size_type n_elements() const
Definition index_set.h:1949
void add_range_lower_bound(const Range &range)
Definition index_set.cc:601
ElementIterator begin() const
Definition index_set.h:1725
size_type pop_front()
Definition index_set.cc:579
void set_size(const size_type size)
Definition index_set.h:1779
void read(std::istream &in)
Definition index_set.cc:752
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
IndexSet tensor_product(const IndexSet &other) const
Definition index_set.cc:549
void write(std::ostream &out) const
Definition index_set.cc:737
void block_read(std::istream &in)
Definition index_set.cc:788
bool is_compressed
Definition index_set.h:1123
void add_ranges_internal(boost::container::small_vector< std::pair< size_type, size_type >, 200 > &tmp_ranges, const bool ranges_are_sorted)
Definition index_set.cc:617
IntervalIterator begin_intervals() const
Definition index_set.h:1746
std::vector< Range > ranges
Definition index_set.h:1113
void subtract_set(const IndexSet &other)
Definition index_set.cc:480
ElementIterator end() const
Definition index_set.h:1737
Threads::Mutex compress_mutex
Definition index_set.h:1146
size_type index_space_size
Definition index_set.h:1129
void block_write(std::ostream &out) const
Definition index_set.cc:774
IndexSet get_view(const size_type begin, const size_type end) const
Definition index_set.cc:279
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1818
std::size_t memory_consumption() const
size_type nth_index_in_set_binary_search(const size_type local_index) const
Definition index_set.cc:844
void compress() const
Definition index_set.h:1799
size_type pop_back()
Definition index_set.cc:561
std::vector< size_type > get_index_vector() const
Definition index_set.cc:933
Teuchos::RCP< Tpetra::Map< int, types::signed_global_dof_index, NodeType > > make_tpetra_map_rcp(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:974
types::global_dof_index size_type
Definition index_set.h:85
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1846
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:40
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:603
constexpr bool running_in_debug_mode()
Definition config.h:78
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:41
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:647
#define AssertIntegerConversion(index1, index2)
#define DEAL_II_ASSERT_UNREACHABLE()
#define AssertThrowIntegerConversion(index1, index2)
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Tpetra::KokkosCompat::KokkosDeviceWrapperNode< typename MemorySpace::kokkos_space::execution_space, typename MemorySpace::kokkos_space > NodeType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:120
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
Teuchos::RCP< T > make_rcp(Args &&...args)
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1033
constexpr types::global_dof_index invalid_dof_index
Definition types.h:269
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
int signed_global_dof_index
Definition types.h:105
unsigned int global_dof_index
Definition types.h:94
static bool end_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1071
static bool nth_index_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1077
size_type end
Definition index_set.h:1039
size_type nth_index_in_set
Definition index_set.h:1041
size_type begin
Definition index_set.h:1038