-
Notifications
You must be signed in to change notification settings - Fork 0
/
nedelec_project_impl.h
1225 lines (1074 loc) · 43.3 KB
/
nedelec_project_impl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2013-2014: G-CSC, Goethe University Frankfurt
* Author: Dmitry Logashenko
*
* This file is part of UG4.
*
* UG4 is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 3 (as published by the
* Free Software Foundation) with the following additional attribution
* requirements (according to LGPL/GPL v3 §7):
*
* (1) The following notice must be displayed in the Appropriate Legal Notices
* of covered and combined works: "Based on UG4 (www.ug4.org/license)".
*
* (2) The following notice must be displayed at a prominent place in the
* terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
*
* (3) The following bibliography is recommended for citation and must be
* preserved in all covered files:
* "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
* parallel geometric multigrid solver on hierarchically distributed grids.
* Computing and visualization in science 16, 4 (2013), 151-164"
* "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
* flexible software system for simulating pde based models on high performance
* computers. Computing and visualization in science 16, 4 (2013), 165-179"
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
/**
* nedelec_project_impl.h - implementation of class members of the classes for
* the projection of the functions based on the Nedelec element to the space of
* the divergence-free functions.
*/
#include "lib_disc/spatial_disc/disc_util/fe_geom.h"
#include "lib_disc/spatial_disc/disc_util/geom_provider.h"
#include "lib_disc/local_finite_element/lagrange/lagrange.h"
#include "lib_disc/quadrature/gauss/gauss_quad.h"
#include "nedelec_local_ass.h"
namespace ug{
namespace Electromagnetism{
/**
* Class constructor:
*/
template <typename TDomain, typename TAlgebra>
NedelecProject<TDomain, TAlgebra>::NedelecProject
(
SmartPtr<EMaterial<TDomain> > emMatherial, ///< properties of the materials
SmartPtr<ApproximationSpace<TDomain> > vertApproxSpace, ///< vertex-centered approx. space
SmartPtr<ILinearOperatorInverse<pot_vector_type> > potSolver ///< linear solver for the potential
)
: m_spEmMaterial (emMatherial),
m_spVertApproxSpace (vertApproxSpace),
m_bDampDVFs (true),
m_auxLocLaplace (new AuxLaplaceLocAss (*this)),
m_auxLaplaceRHS (new AuxLaplaceRHS (*this)),
m_auxLaplaceAss (new DomainDiscretization<TDomain, TPotAlgebra> (vertApproxSpace)),
m_auxLaplaceOp (new AssembledLinearOperator<TPotAlgebra> (SmartPtr<IAssemble<TPotAlgebra> >(m_auxLaplaceAss))),
m_potSolver (potSolver)
{
// Check the parameters:
if (m_spEmMaterial.invalid ())
UG_THROW ("NedelecProject: Object of the matherial properties not specified.");
if (m_spVertApproxSpace.invalid ())
UG_THROW ("NedelecProject: Illegal vert.-centered approx. space.");
if (m_spVertApproxSpace->num_fct () != 1)
UG_THROW ("NedelecProject: Exactly one function should be defined in the vert.-centered approx. space.");
if (! m_spVertApproxSpace->is_def_everywhere (0))
UG_THROW ("NedelecProject: The function in the vert.-centered approx. space must be defined everywhere.");
if (m_potSolver.invalid ())
UG_THROW ("NedelecProject: Illegal solver for the auxiliary problems.");
// Compose the global discretization of the auxiliary equations:
m_auxLaplaceAss->add (SmartPtr<IElemDisc<TDomain> >(m_auxLocLaplace));
m_auxLaplaceAss->add
(SmartPtr<IDomainConstraint<TDomain, TPotAlgebra> >(m_auxLaplaceRHS));
}
/**
* Performs the projection of all functions in a grid function
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::apply
(
vector_type & vec ///< the grid function to project
)
{
GridFunction<TDomain, TAlgebra> * u_ptr;
if ((u_ptr = dynamic_cast<GridFunction<TDomain, TAlgebra> *> (&vec)) == 0)
UG_THROW ("NedelecProject::apply: Specify a grid function, not a vector!");
FunctionGroup fctGrp (u_ptr->dof_distribution_info ());
fctGrp.add_all ();
apply (*u_ptr, fctGrp);
}
/**
* Performs the projection of given functions
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::apply
(
GridFunction<TDomain, TAlgebra> & u, ///< the grid function to project
const char * fct_names ///< the function name
)
{
// Get the function indices:
FunctionGroup fctGrp;
try
{
fctGrp = u.fct_grp_by_name (fct_names);
}
UG_CATCH_THROW ("NedelecProject: Functions '" << fct_names << "' not all contained in the edge approximation space.");
// Project:
apply (u, fctGrp);
}
/**
* Performs the projection of given functions
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::apply
(
GridFunction<TDomain, TAlgebra> & u, ///< the grid function to project
const FunctionGroup & fctGrp ///< the function indices
)
{
// Check the data:
if (! m_spEmMaterial->finalized ())
UG_THROW ("NedelecProject: The material data structure has not been finalized.");
// Get the domain:
SmartPtr<domain_type> domain = u.domain ();
if (domain.get () != m_spVertApproxSpace->domain().get ())
UG_THROW ("NedelecProject: The approximation spaces are based on different domains.");
// Check the function indices:
for (size_t i_fct = 0; i_fct < fctGrp.size (); i_fct++)
if (u.local_finite_element_id(fctGrp[i_fct]).type () != LFEID::NEDELEC)
UG_THROW ("NedelecProject: Not a Nedelec-element-based grid function specified for the projection.");
// Get the DoF distributions:
SmartPtr<DoFDistribution> edgeDD = u.dof_distribution ();
SmartPtr<DoFDistribution> vertDD = m_spVertApproxSpace->dof_distribution (u.grid_level ());
// Create temporary grid functions for the auxiliary problem
pot_gf_type aux_rhs (m_spVertApproxSpace, vertDD);
pot_gf_type aux_cor (m_spVertApproxSpace, vertDD);
// Assemble the matrix of the auxiliary problem:
aux_cor.set (0.0);
m_auxLaplaceOp->set_level (u.grid_level ());
m_auxLaplaceOp->init (aux_cor);
// Initizlize the solver:
m_potSolver->init (m_auxLaplaceOp);
// Compute the Dirichlet vector fields:
if (m_bDampDVFs)
{
alloc_DVFs (* domain.get (), aux_rhs);
compute_DVFs (aux_rhs);
compute_DVF_potential_coeffs (* domain.get (), * vertDD.get ());
}
// Project every function:
for (size_t i_fct = 0; i_fct < fctGrp.size (); i_fct++)
project_func (domain, edgeDD, u, fctGrp[i_fct], vertDD, aux_rhs, aux_cor);
// Release the Dirichlet vector fields:
if (m_bDampDVFs)
{
for (size_t i = 0; i < m_DVF_phi.size (); i++)
delete m_DVF_phi [i];
m_DVF_phi.resize (0);
}
}
/**
* Computes weak divergence in insulators and saves it in a vertex-centered grid function
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::compute_div
(
SmartPtr<GridFunction<TDomain, TAlgebra> > sp_u, ///< [in] the vector field grid function
const char * u_fct_name, ///< [in] the function name of the Nedelec DoFs
SmartPtr<GridFunction<TDomain, TPotAlgebra> > sp_div ///< [out] the grid function for the divergence
)
{
// Check the data:
if (sp_u.invalid ())
UG_THROW ("NedelecProject: Illegal input grid function specification.");
GridFunction<TDomain, TAlgebra> & u = * sp_u.get ();
if (sp_div.invalid ())
UG_THROW ("NedelecProject: Illegal output grid function specification.");
pot_vector_type & div = * sp_div.get ();
if (! m_spEmMaterial->finalized ())
UG_THROW ("The material data structure has not been finalized.");
// Get the domain:
SmartPtr<domain_type> domain = u.domain ();
if (domain.get () != m_spVertApproxSpace->domain().get ())
UG_THROW ("NedelecProject: The approximation spaces are based on different domains.");
// Get the function index of the vector field:
size_t u_fct;
try
{
u_fct = u.fct_id_by_name (u_fct_name);
}
UG_CATCH_THROW (" Function '" << u_fct_name << "' not contained in the edge approximation space.");
if (u.local_finite_element_id(u_fct).type () != LFEID::NEDELEC)
UG_THROW ("NedelecProject: Not a Nedelec-element-based input grid function.");
// Get the DoF distributions:
SmartPtr<DoFDistribution> edgeDD = u.dof_distribution ();
SmartPtr<DoFDistribution> vertDD = sp_div->dof_distribution ();
if (vertDD.get () != m_spVertApproxSpace->dof_distribution(u.grid_level ()).get ())
UG_THROW ("NedelecProject: DoFDistribution mismatch, illegal output grid function.");
// Compute the weak divergence:
DenseVector<VariableArray1<number> > charge;
assemble_div (* domain.get(), * edgeDD.get(), u, u_fct, * vertDD.get(), div, charge);
# ifdef UG_PARALLEL
div.change_storage_type (PST_CONSISTENT);
# endif
}
/**
* Projects one function, thus performs the main action of the projection.
* This function assumes that the object is completely initialized, i.e.
* the solver and the discretization are initialized.
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::project_func
(
const SmartPtr<TDomain> & domain, ///< [in] the domain
const SmartPtr<DoFDistribution> & edgeDD, ///< [in] the edge DD
vector_type & u, ///< [in] the vector where to project
size_t fct, ///< [in] function in u to compute div for
const SmartPtr<DoFDistribution> & vertDD, ///< [in] the vertex DD
pot_gf_type & aux_rhs, ///< [in] a grid function for the rhs of the aux. problem
pot_gf_type & aux_cor ///< [in] a grid function for the sol. of the aux. problem
)
{
DenseVector<VariableArray1<number> > charge;
//--- Compute the correction due to the divergence:
# ifdef UG_PARALLEL
aux_cor.set_storage_type (PST_CONSISTENT);
# endif
aux_cor.set (0.0);
// 1. Compute the weak div:
assemble_div (* domain.get(), * edgeDD.get(), u, fct, * vertDD.get(), aux_rhs, charge);
// 2. Correct the divergence in the right-hand side:
m_auxLaplaceRHS->set_base_conductor (-1);
m_auxLaplaceAss->adjust_solution (aux_rhs, vertDD);
// 3. Solve the auxiliary system:
m_auxLaplaceAss->adjust_solution (aux_cor, vertDD);
m_potSolver->apply (aux_cor, aux_rhs);
// 4. Damp the Dirichlet vector fields:
if (m_bDampDVFs)
damp_DVFs (aux_cor, charge);
// 5. Merge the correction into the solution:
distribute_cor (* domain.get(), * edgeDD.get(), u, fct, * vertDD.get(), aux_cor);
}
/**
* Assembles the weak divergence operator for all elements of the same type.
* Remark: Only full-dimensional elements should be considered.
*/
template <typename TDomain, typename TAlgebra>
template <typename TElem>
void NedelecProject<TDomain, TAlgebra>::weak_div_elem_type
(
const TDomain & domain, ///< [in] the domain
const DoFDistribution & edgeDD, ///< [in] the edge DD
const vector_type & u, ///< [in] the vector to compute div for
size_t fct, /// [in] function in u to compute div for
const DoFDistribution & vertDD, ///< [in] the vertex DD
pot_vector_type & div ///< to update the weak divergence of u
)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
typedef typename DoFDistribution::traits<TElem>::const_iterator iterator;
// Get the conductor distribution and the positions of the grid points:
const std::vector<int> & cond_index = m_spEmMaterial->base_conductor_index ();
const typename TDomain::position_accessor_type & aaPos = domain.position_accessor ();
// Arrays for the indices in the vectors:
std::vector<DoFIndex> vEdgeInd (ref_elem_type::numEdges);
std::vector<size_t> vVertInd (ref_elem_type::numCorners);
// Loop over all subsets representing insulators:
for (int si = 0; si < vertDD.num_subsets (); si++)
if (cond_index [si] == -1)
{
// Loop over all the elements of the given type in the subset
iterator e_end = vertDD.template end<TElem> (si);
for (iterator elem_iter = vertDD.template begin<TElem> (si);
elem_iter != e_end; ++elem_iter)
{
TElem * pElem = *elem_iter;
MathMatrix<ref_elem_type::numCorners, ref_elem_type::numEdges> B;
MathVector<ref_elem_type::numEdges> loc_u;
MathVector<ref_elem_type::numCorners> loc_div;
// Compute the local weak divergence matrix:
position_type aCorners [ref_elem_type::numCorners];
for (size_t co = 0; co < (size_t) ref_elem_type::numCorners; co++)
aCorners [co] = aaPos [pElem->vertex (co)];
NedelecT1_LDisc<TDomain, TElem>::local_div_matrix (&domain, pElem, aCorners,
(number (*) [ref_elem_type::numEdges]) & (B (0,0)));
// Compute the local contribution to the weak divergence:
if (edgeDD.dof_indices (pElem, fct, vEdgeInd) != (size_t) ref_elem_type::numEdges)
UG_THROW ("NedelecProject: Edge DoF distribution mismatch. Not the Nedelec-Type-1 element?");
for (size_t i = 0; i < (size_t) ref_elem_type::numEdges; i++)
loc_u [i] = DoFRef (u, vEdgeInd [i]);
MatVecMult (loc_div, B, loc_u);
// Add the local contribution to the global vector:
if (vertDD.algebra_indices (pElem, vVertInd) != (size_t) ref_elem_type::numCorners)
UG_THROW ("NedelecProject: Vertex DoF distribution mismatch. Not the Lagrange-Order-1 element?");
for (size_t i = 0; i < (size_t) ref_elem_type::numCorners; i++)
div [vVertInd [i]] += loc_div [i];
}
}
}
/**
* Sets the div operator to 0 in conductors for all elements of the same type
* and computes the charges of the base conductors. The charges of the base
* conductors are the sums of the divergences in all the interface vertices
* between the base conductors and the insulators. As the divergence inside
* of the conductors is set to zero, we sum over all the nodes in the closure
* of the conductors.
* Remark: Only full-dimensional elements should be considered.
*/
template <typename TDomain, typename TAlgebra>
template <typename TElem>
void NedelecProject<TDomain, TAlgebra>::clear_div_in_conductors
(
const DoFDistribution & vertDD, ///< [in] the vertex DD
pot_vector_type & div, ///< [out] to update the weak divergence of u
DenseVector<VariableArray1<number> > & charge ///< [out] charges of the conductors
)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
typedef typename DoFDistribution::traits<TElem>::const_iterator iterator;
const std::vector<int> & base_cond = m_spEmMaterial->base_conductors ();
if (base_cond.size () == 0) return; // no conductors
std::vector<size_t> vVertInd (ref_elem_type::numCorners);
const std::vector<int> & base_cond_index = m_spEmMaterial->base_conductor_index ();
int ci;
// Loop over all subsets representing conductors:
for (int si = 0; si < vertDD.num_subsets (); si++)
if ((ci = base_cond_index [si]) >= 0)
{
number & charge_entry = charge [ci];
// Loop over all the elements of the given type in the subset
iterator e_end = vertDD.template end<TElem> (si);
for (iterator elem_iter = vertDD.template begin<TElem> (si);
elem_iter != e_end; ++elem_iter)
{
TElem * pElem = *elem_iter;
if (vertDD.algebra_indices (pElem, vVertInd) != (size_t) ref_elem_type::numCorners)
UG_THROW ("NedelecProject: Vertex DoF distribution mismatch. Not the Lagrange-Order-1 element?");
for (size_t i = 0; i < (size_t) ref_elem_type::numCorners; i++)
{
number & div_entry = div [vVertInd [i]];
charge_entry += div_entry;
div_entry = 0;
}
}
}
}
/**
* Assembles the weak divergence operator on the whole grid and computes the
* charges of the base conductors.
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::assemble_div
(
const TDomain & domain, ///< [in] the domain
const DoFDistribution & edgeDD, ///< [in] the edge DD
const vector_type & u, ///< [in] the vector to compute div for
size_t fct, ///< [in] function in u to compute div for
const DoFDistribution & vertDD, ///< [in] the vertex DD
pot_vector_type & div, ///< [out] for the weak divergence of u
DenseVector<VariableArray1<number> > & charge ///< [out] charges of the conductors
)
{
// The full-dim. grid element types for this dimension:
typedef typename domain_traits<WDim>::DimElemList ElemList;
const std::vector<int> & base_cond = m_spEmMaterial->base_conductors ();
charge.resize (base_cond.size ());
if (charge.size () != 0)
charge = 0.0;
# ifdef UG_PARALLEL
div.set_storage_type (PST_ADDITIVE);
# endif
div.set (0.0);
// Compute the divergence for all the types of the elements:
boost::mpl::for_each<ElemList> (WeakDiv (this, domain, edgeDD, u, fct, vertDD, div));
// Clear the entries at all the points in the closure of the conductors:
boost::mpl::for_each<ElemList> (ClearDivInConductors (this, vertDD, div, charge));
#ifdef UG_PARALLEL
// Sum up the charges over the processes
if (charge.size () != 0)
{
pcl::ProcessCommunicator proc_comm;
DenseVector<VariableArray1<number> > sum;
sum.resize (charge.size ());
proc_comm.allreduce (&(charge.at(0)), &(sum.at(0)), charge.size (), PCL_RO_SUM);
charge = sum;
}
#endif
}
/**
* Computes the edge-centered correction from the vertex-centered (potential)
* one by applying the gradient operator.
*
* Note that the edge-centered correction is considered only in the closure
* of insulators. There, it is the gradient of the vertex-centered (potential)
* correction. But in the conductors (as well as on the Dirichlet boundaries),
* the vertex-centered correction is constant by construction so that its
* gradient is 0 any way. (Adjacent conductors are concidered as one conductor
* in the projection, so that the different factors for the charges in different
* conductors does not violate the constant value of the vertex-centered
* correction in any separated piece of conductors.) For this, the function
* computes the gradient over the whole domain.
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::distribute_cor
(
const TDomain & domain, ///< [in] the domain
const DoFDistribution & edgeDD, ///< [in] the edge DD
vector_type & u, ///< [out] the vector to correct (to update)
size_t fct, ///< [in] function in u to update
const DoFDistribution & vertDD, ///< [in] the vertex DD
const pot_vector_type & cor ///< [in] the potential correction for u
)
{
// Iterator over edges
typedef DoFDistribution::traits<Edge>::const_iterator t_edge_iterator;
#ifdef UG_PARALLEL
if (! u.has_storage_type (PST_CONSISTENT))
UG_THROW ("Consistent storage type is expected for the grid function to project.")
#endif
// Arrays for the indices in the vectors:
std::vector<size_t> vVertInd (1);
std::vector<DoFIndex> vEdgeInd (1);
// Loop over edges:
t_edge_iterator iterEnd = edgeDD.end<Edge> ();
for (t_edge_iterator iter = edgeDD.begin<Edge> (); iter != iterEnd; iter++)
{
number corner_val [2];
Edge * pEdge = *iter;
// Get the potential the ends of the edge:
for (size_t i = 0; i < 2; i++)
{
// Get the multiindices
if (vertDD.inner_algebra_indices (pEdge->vertex(i), vVertInd) != 1)
UG_THROW ("NedelecProject:"
"More than one DoF per vertex for the auxiliary systems.");
// Set the Dirichlet entry
corner_val [i] = cor [vVertInd[0]];
}
// Compute the gradient:
if (edgeDD.inner_dof_indices (pEdge, fct, vEdgeInd) != 1)
UG_THROW ("NedelecProject:"
"More than one DoF per edge. Not the Nedelec-Type-1 element?");
DoFRef (u, vEdgeInd[0])
-= corner_val [1] - corner_val [0];
}
}
/**
* Allocates memory for the DVFs associated with the conductors that
* do not touch the Dirichlet boundary
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::alloc_DVFs
(
const TDomain & domain, ///< [in] the domain
pot_gf_type & aux_rhs ///< [in] the grid function of the auxiliary rhs
)
{
const DoFDistribution * vertDD = aux_rhs.dof_distribution().get ();
const std::vector<int> & cond_index = m_spEmMaterial->base_conductor_index ();
const std::vector<int> & base_cond = m_spEmMaterial->base_conductors ();
size_t n_cond = base_cond.size ();
std::vector<char> grounded (n_cond); // a logical array; do not replace char->bool to avoid problems with MPI!
for (size_t i = 0; i < n_cond; i++) grounded [i] = 0; // false
// Exclude grounded conductors
if (m_spDirichlet.valid ())
{
typedef DoFDistribution::traits<Edge>::const_iterator t_edge_iterator;
const grid_type * grid = domain.grid().get ();
const subset_handler_type * ss_handler = domain.subset_handler().get ();
Grid::volume_traits::secure_container volume_list;
SubsetGroup dirichlet_ssgrp (domain.subset_handler());
m_spDirichlet->get_dirichlet_subsets (dirichlet_ssgrp);
// Loop the Dirichlet subsets
for (size_t j = 0; j < dirichlet_ssgrp.size (); j++)
{
int si = dirichlet_ssgrp [j];
// Loop the Dirichlet edges in the subset
//TODO: Requires a parallelization!
t_edge_iterator iterEnd = vertDD->end<Edge> (si);
for (t_edge_iterator iter = vertDD->begin<Edge> (si); iter != iterEnd; iter++)
{
// Loop the adjacent volumes
((grid_type*) grid)->associated_elements (volume_list, *iter);
for (size_t v = 0; v < volume_list.size (); v++)
{
int v_b_cnd;
if ((v_b_cnd = cond_index [ss_handler->get_subset_index (volume_list [v])]) < 0)
continue;
grounded [v_b_cnd] = 1; // true
}
}
}
# ifdef UG_PARALLEL
/* The base conductors are well-defined over the parallel
* architechture. For that, we only need to summarize the flags.
*/
pcl::ProcessCommunicator proc_comm;
std::vector<char> loc_grounded = grounded;
proc_comm.allreduce (loc_grounded, grounded, PCL_RO_LOR);
# endif
}
// Allocate memory for the conductors
m_DVF_phi.resize (n_cond);
for (size_t i = 0; i < n_cond; i++)
if (grounded [i])
m_DVF_phi [i] = NULL; // the conductor is grounded, skip it
else
m_DVF_phi [i] = new pot_gf_type (aux_rhs.approx_space (), aux_rhs.dof_distribution ());
}
/**
* Computes the Dirichlet vector fields
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::compute_DVFs
(
pot_gf_type & aux_rhs //< grid function for the auxiliary rhs
)
{
for (size_t i = 0; i < m_DVF_phi.size (); i++)
{
pot_gf_type * phi = m_DVF_phi [i];
if (phi == NULL) continue; // a grounded conductor
// 1. Compose the right-hand side:
m_auxLaplaceRHS->set_base_conductor (i);
# ifdef UG_PARALLEL
aux_rhs.set_storage_type (PST_ADDITIVE);
# endif
aux_rhs.set (0.0);
m_auxLaplaceAss->adjust_solution (aux_rhs, aux_rhs.dof_distribution ());
// 2. Solve the auxiliary system:
# ifdef UG_PARALLEL
phi->set_storage_type (PST_CONSISTENT);
# endif
phi->set (0.0);
m_auxLaplaceAss->adjust_solution (*phi, phi->dof_distribution ());
m_potSolver->apply (*phi, aux_rhs);
}
}
/**
* Computes the potential coefficients
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::compute_DVF_potential_coeffs
(
const TDomain & domain, ///< [in] the domain
DoFDistribution & vertDD ///< [in] the vertex DD
)
{
// The full-dim. grid element types for this dimension:
typedef typename domain_traits<WDim>::DimElemList ElemList;
// Prepare the matrix for the potential coefficients:
size_t num_b_cond = m_DVF_phi.size ();
m_potCoeff.resize (num_b_cond, num_b_cond, false);
if (num_b_cond <= 0) return; // nothing to do: no conductors
// Get the base conductor indices at vertices (set -2 for insulators, -1 for grounded conductors)
SmartPtr<MultiGrid> sp_mg = vertDD.multi_grid ();
a_vert_cond_type a_vert_base_cond;
sp_mg->attach_to_vertices (a_vert_base_cond);
aa_vert_cond_type vert_base_cond (*sp_mg, a_vert_base_cond);
SetAttachmentValues (vert_base_cond, vertDD.begin<Vertex>(), vertDD.end<Vertex>(), -2);
boost::mpl::for_each<ElemList> (MarkCondVert (this, vertDD, vert_base_cond));
# ifdef UG_PARALLEL
AttachmentAllReduce<Vertex> (*sp_mg, a_vert_base_cond, PCL_RO_MAX);
# endif
// Compute the capacity matrix
m_potCoeff = 0.0;
boost::mpl::for_each<ElemList> (IntegrateDivDVF
(this, domain, vertDD, vert_base_cond, m_potCoeff));
sp_mg->detach_from_vertices (a_vert_base_cond); // we do not need the attachment any more
# ifdef UG_PARALLEL
{
// Only the lower triangular part is assemble, but for simplicity, we
// reduce the whole matrix (as it should be relatively small)
pcl::ProcessCommunicator proc_comm;
DenseMatrix<VariableArray2<number> > sum;
sum.resize (m_potCoeff.num_rows (), m_potCoeff.num_cols (), false);
proc_comm.allreduce (&(m_potCoeff.at(0,0)), &(sum.at(0,0)),
m_potCoeff.num_rows () * m_potCoeff.num_cols (), PCL_RO_SUM);
m_potCoeff = sum;
}
# endif
for (size_t i = 0; i < num_b_cond; i++)
if (m_DVF_phi [i] == NULL)
m_potCoeff (i, i) = 1; // set the matrix to identity for the grounded conductors
else
for (size_t j = i + 1; j < num_b_cond; j++)
m_potCoeff (i, j) = m_potCoeff (j, i); // use the symmetry in the lower triangular part
// Invert the capacity matrix to get the potential coefficients
Invert (m_potCoeff);
}
/**
* Damps the Dirichlet vector fields (DVFs)
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::damp_DVFs
(
pot_vector_type & cor, ///< the potential correction to update
const DenseVector<VariableArray1<number> > & charge ///< [in] charges of the conductors
)
{
DenseVector<VariableArray1<number> > factor = m_potCoeff * charge;
for (size_t i = 0; i < m_DVF_phi.size (); i++)
if (m_DVF_phi [i] != NULL) // skip grounded conductors
VecScaleAdd (cor, 1.0, cor, factor [i], * (pot_vector_type *) m_DVF_phi [i]);
}
/**
* Initializes 'vert_base_cond' with the indices of the base conductors
* according to the closure of the conductive subsets. Note that the insulators
* are marked with -2 (and NOT with -1!), whereas the grounded conductors with
* -1 and the other conductors with their base conductor index. The attachment
* must be preinitialized with the default value -2 (for the insulators).
*/
template <typename TDomain, typename TAlgebra>
template <typename TElem>
void NedelecProject<TDomain, TAlgebra>::mark_cond_vert_elem_type
(
DoFDistribution & vertDD, ///< [in] the vertex DD
aa_vert_cond_type & vert_base_cond ///< [out] indices of the base conductors for every vertex
)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
typedef typename DoFDistribution::traits<TElem>::const_iterator iterator;
int base_cond_ind;
// Get the conductor distribution and the positions of the grid points:
const std::vector<int> & cond_index = m_spEmMaterial->base_conductor_index ();
// Array for the indices in the vectors:
std::vector<size_t> vVertInd (ref_elem_type::numCorners);
// Loop over all subsets representing conductors:
for (int si = 0; si < vertDD.num_subsets (); si++)
if ((base_cond_ind = cond_index [si]) >= 0)
{
// Mark the grounded conductors with -1 (insulators are marked with -2)
if (m_DVF_phi [base_cond_ind] == NULL)
base_cond_ind = -1;
// Loop over all the elements of the given type in the subset
iterator e_end = vertDD.template end<TElem> (si);
for (iterator elem_iter = vertDD.template begin<TElem> (si);
elem_iter != e_end; ++elem_iter)
{
TElem * pElem = *elem_iter;
for (size_t co = 0; co < (size_t) ref_elem_type::numCorners; co++)
vert_base_cond [pElem->vertex (co)] = base_cond_ind;
}
}
}
/**
* Integration of div E over boundaries of conductors (for one type of elements)
* to get the capacity matrix. We compute only the lower triangular part of
* the capacity matrix: This matrix is symmetric.
*/
template <typename TDomain, typename TAlgebra>
template <typename TElem>
void NedelecProject<TDomain, TAlgebra>::integrate_div_DVF_elem_type
(
const TDomain & domain, ///< [in] the domain
const DoFDistribution & vertDD, ///< [in] the vertex DD
const aa_vert_cond_type & vert_base_cond, ///< [in] indices of the base conductors for every vertex
DenseMatrix<VariableArray2<number> > & C ///< [out] the capacity matrix to update
)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
typedef typename DoFDistribution::traits<TElem>::const_iterator iterator;
// Get the positions of the grid points:
const typename TDomain::position_accessor_type & aaPos = domain.position_accessor ();
// Get the conductor distribution and the positions of the grid points:
const std::vector<int> & cond_index = m_spEmMaterial->base_conductor_index ();
// Array for the indices in the vectors:
std::vector<size_t> vVertInd (ref_elem_type::numCorners);
// Loop over all subsets representing insulators:
for (int si = 0; si < vertDD.num_subsets (); si++)
if (cond_index [si] == -2) // we loop over INSULATORS! (>= -1 are conductors)
{
// Loop over all the elements of the given type in the subset
iterator e_end = vertDD.template end<TElem> (si);
for (iterator elem_iter = vertDD.template begin<TElem> (si);
elem_iter != e_end; ++elem_iter)
{
TElem * pElem = *elem_iter;
size_t corner_cond [ref_elem_type::numCorners];
bool cond_bnd_flag;
// get the indices in the vectors:
cond_bnd_flag = false;
for (size_t co = 0; co < (size_t) ref_elem_type::numCorners; co++)
if ((corner_cond [co] = vert_base_cond [pElem->vertex (co)]) >= 0)
cond_bnd_flag = true;
if (! cond_bnd_flag) continue; // not at a boundary of a non-grounded conductor
// get the corner positions:
position_type aCorners [ref_elem_type::numCorners];
for (size_t co = 0; co < (size_t) ref_elem_type::numCorners; co++)
aCorners [co] = aaPos [pElem->vertex (co)];
// assemble the local matrix
number loc_A [ref_elem_type::numCorners] [ref_elem_type::numCorners];
LocLaplaceA<TElem>::stiffness (pElem, aCorners, loc_A);
// add the contributions to the capacity matrix
if (vertDD.algebra_indices (pElem, vVertInd) != (size_t) ref_elem_type::numCorners)
UG_THROW ("NedelecProject: Vertex DoF distribution mismatch. Not the Lagrange-Order-1 element?");
for (int to_cond = 0; to_cond < (int) m_DVF_phi.size (); to_cond++)
{
// In this computation, C(i,j) is the charge induced by phi(j)
// in conductor i. The conductor i, where the charge is induced,
// is called 'the from-conductor', and the conductor j, whose
// potential induces the charge, is called 'the to-conductor'.
pot_vector_type * phi = m_DVF_phi [to_cond];
if (phi == NULL) continue; // this is a grounded conductor
for (size_t to_co = 0; to_co < (size_t) ref_elem_type::numCorners; to_co++)
{
int from_cond;
number phi_val = (* phi) [vVertInd [to_co]];
for (size_t from_co = 0; from_co < (size_t) ref_elem_type::numCorners; from_co++)
// Exclude inner vertices of insulators and grounded conductors, as well as the upper triangle
if ((from_cond = corner_cond [from_co]) >= to_cond)
C (from_cond, to_cond) += loc_A [from_co] [to_co] * phi_val;
}
}
}
}
}
/*----- Assembling the auxiliary Poisson problems 1: class AuxLaplaceLocAss -----*/
/**
* Computes the local discretization of the Laplace operator
*/
template <typename TDomain, typename TAlgebra>
template <typename TElem>
void NedelecProject<TDomain, TAlgebra>::LocLaplaceA<TElem>::stiffness
(
GridObject * elem, ///< [in] element to prepare
const position_type vCornerCoords [], ///< [in] coordinates of the corners of the element
number loc_A [numCorners] [numCorners] ///< [out] the local stiffness matrix
)
{
typedef FEGeometry<TElem, WDim, LagrangeLSFS<ref_elem_type, 1>, GaussQuadrature<ref_elem_type, 1> > TFEGeom;
// request the finite element geometry
TFEGeom & geo = GeomProvider<TFEGeom>::get ();
// we assume that this is the simplest discretization:
UG_ASSERT (geo.num_ip () == 1, "Only the simplest quadrature is supported here.");
// initialize the fe geometry
geo.update (elem, vCornerCoords);
// compute the upper triangle of the local stiffness matrix
for (size_t from_co = 0; from_co < (size_t) numCorners; from_co++)
for (size_t to_co = from_co; to_co < (size_t) numCorners; to_co++)
loc_A [from_co] [to_co]
= VecDot (geo.global_grad (0, from_co), geo.global_grad (0, to_co))
* geo.weight (0);
// use the symmetry
for (size_t from_co = 1; from_co < (size_t) numCorners; from_co++)
for (size_t to_co = 0; to_co < from_co; to_co++)
loc_A [from_co] [to_co] = loc_A [to_co] [from_co];
}
/**
* Class constructor that gets the master NedelecProject object and extracts
* the data necessary for the base class:
*/
template <typename TDomain, typename TAlgebra>
NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::AuxLaplaceLocAss
(
NedelecProject & master
)
: IElemDisc<TDomain> (master.m_spVertApproxSpace->name(0).c_str(), master.m_spEmMaterial->subset_names()),
m_master (master), m_do_assemble_here (false)
{
// register assemble functions
register_all_loc_discr_funcs ();
}
// check the basis and the grid
template<typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::prepare_setting
(
const std::vector<LFEID> & vLfeID,
bool bNonRegular
)
{
if (bNonRegular)
UG_THROW ("NedelecProject:"
" The discretization of the auxiliary systems does not support hanging nodes.\n");
if (vLfeID.size () != 1)
UG_THROW ("NedelecProject:"
" Only scalar grid functions are supported for the potential");
if (vLfeID[0].type() != LFEID::LAGRANGE || vLfeID[0].order() != 1)
UG_THROW ("NedelecProject:"
" Only Largange-1 functions are supported for the potential");
}
// register the local assembler functions for all the elements and dimensions
template<typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::register_all_loc_discr_funcs ()
{
// get all grid element types in this dimension and below
typedef typename domain_traits<WDim>::DimElemList ElemList;
// switch assemble functions
boost::mpl::for_each<ElemList> (RegisterLocalDiscr (this));
}
// register the local assembler functions for a given element
template<typename TDomain, typename TAlgebra>
template<typename TElem> // the element to register for
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::register_loc_discr_func ()
{
ReferenceObjectID id = geometry_traits<TElem>::REFERENCE_OBJECT_ID;
this->clear_add_fct(id);
this->set_prep_elem_loop_fct(id, & AuxLaplaceLocAss::template prepare_element_loop<TElem>);
this->set_prep_elem_fct (id, & AuxLaplaceLocAss::template prepare_element<TElem>);
this->set_fsh_elem_loop_fct (id, & AuxLaplaceLocAss::template finish_element_loop<TElem>);
this->set_add_jac_A_elem_fct(id, & AuxLaplaceLocAss::template ass_JA_elem<TElem>);
this->set_add_jac_M_elem_fct(id, & AuxLaplaceLocAss::template ass_JM_elem<TElem>);
this->set_add_def_A_elem_fct(id, & AuxLaplaceLocAss::template ass_dA_elem<TElem>);
this->set_add_def_M_elem_fct(id, & AuxLaplaceLocAss::template ass_dM_elem<TElem>);
this->set_add_rhs_elem_fct (id, & AuxLaplaceLocAss::template ass_rhs_elem<TElem>);
}
// prepares the loop over the elements: check if the subdomain an insulator
template<typename TDomain, typename TAlgebra>
template<typename TElem>
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::prepare_element_loop
(
ReferenceObjectID roid, // [in] only elements with this roid are looped over
int si // [in] and only in this subdomain
)
{
// We assemble in insulators only
if (m_master.m_spEmMaterial->base_conductor_index (si) == -1)
m_do_assemble_here = true;
else
m_do_assemble_here = false;
}
/// transfer the local stiffness matrix to the global discretization
template<typename TDomain, typename TAlgebra>
template<typename TElem>
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceLocAss::ass_JA_elem
(
LocalMatrix & J, ///< [in] the matrix to update
const LocalVector & u, ///< [in] the local solution (not used here)
GridObject * elem, ///< [in] element to prepare
const position_type vCornerCoords [] ///< [in] coordinates of the corners of the element
)
{
if (! m_do_assemble_here) return;
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
// assemble the local matrix
number loc_A [ref_elem_type::numCorners] [ref_elem_type::numCorners];
NedelecProject<TDomain, TAlgebra>::template LocLaplaceA<TElem>::stiffness (elem, vCornerCoords, loc_A);
// add the local matrix to the global one
for (size_t from_co = 0; from_co < (size_t) ref_elem_type::numCorners; from_co++)
for (size_t to_co = 0; to_co < (size_t) ref_elem_type::numCorners; to_co++)
J (_C_, from_co, _C_, to_co) += loc_A [from_co] [to_co];
}
/*----- Assembling the auxiliary Poisson problems 2: class AuxLaplaceRHS -----*/
/**
* Class constructor:
*/
template <typename TDomain, typename TAlgebra>
NedelecProject<TDomain, TAlgebra>::AuxLaplaceRHS::AuxLaplaceRHS
(
NedelecProject & master
)
: m_master (master), m_base_cond (-2)
{}
/**
* Sets zero values on the whole Dirichlet boundary.
*/
template <typename TDomain, typename TAlgebra>
void NedelecProject<TDomain, TAlgebra>::AuxLaplaceRHS::set_zero_Dirichlet
(
pot_vector_type & u, ///< the vector where to set zeros
const DoFDistribution * dd ///< the vert.-based DoF distribution
)
{
if (m_master.m_spDirichlet.invalid ())
return; // no Dirichlet boundaries specified
std::vector<size_t> vVertInd (1);
SubsetGroup dirichlet_ssgrp (m_master.m_spEmMaterial->subset_handler ());
m_master.m_spDirichlet->get_dirichlet_subsets (dirichlet_ssgrp);
// Loop over the Dirichlet subsets
for (size_t j = 0; j < dirichlet_ssgrp.size (); j++)
{
int si = dirichlet_ssgrp [j];
// Loop the edges
t_edge_iterator iterEnd = dd->end<Edge> (si);