forked from alexei-matveev/paragauss-gpl
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_gradient.f90
1959 lines (1730 loc) · 72.5 KB
/
main_gradient.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
!
! ParaGauss, a program package for high-performance computations of
! molecular systems
!
! Copyright (C) 2014 T. Belling, T. Grauschopf, S. Krüger,
! F. Nörtemann, M. Staufer, M. Mayer, V. A. Nasluzov, U. Birkenheuer,
! A. Hu, A. V. Matveev, A. V. Shor, M. S. K. Fuchs-Rohr, K. M. Neyman,
! D. I. Ganyushin, T. Kerdcharoen, A. Woiterski, A. B. Gordienko,
! S. Majumder, M. H. i Rotllant, R. Ramakrishnan, G. Dixit,
! A. Nikodem, T. Soini, M. Roderus, N. Rösch
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License version 2 as
! published by the Free Software Foundation [1].
!
! This program is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
! General Public License for more details.
!
! [1] http://www.gnu.org/licenses/gpl-2.0.html
!
! Please see the accompanying LICENSE file for further information.
!
!=====================================================================
! Public interface of module
!=====================================================================
subroutine main_gradient(loop)
!-------------------------------------------------------------------
!
! Purpose: This is the main routine of the integral part
! for calculation of energy gradients
! executed by the master.
!
! Subroutine called by: main_master
!
! References: Publisher Document: Concepts of Integral Part
!
!
! Author: MS
! Date: 12/96
!
!-------------------------------------------------------------------
! Modifications
!-------------------------------------------------------------------
!
! Modification (Please copy before editing)
! Author: Uwe Birkenheuer
! Date: June 1998
! Description: Moving_Unique_Atom concept introduced
! Split_Gradients concept introduced
! Gradients for Model_Density_Approach introduced
!
!===================================================================
! End of public interface of module
!===================================================================
!..............................................................................
!
! Individual force contributions
! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
! F_grid = Sum(a) [ d/dR w_a ] e_xc[rho_up,rho_dn](r_a)
! Sum(a) w_a [ d/dR r_a ] Nabla e_xc[rho_up,rho_dn](r_a)
!
! F_grid_mda = Sum(a) [ d/dR w_a ] e_xc[rhofit_up,rhofit_dn](r_a)
! Sum(a) w_a [ d/dR r_a ] Nabla e_xc[rhofit_up,rhofit_dn](r_a)
!
! F_ob_num = Sum(s) < d/dR rho_s | V_xc,s[rho_up,rho_dn] >_num
!
! F_rho_num = Sum(s) < d/dR rho_s | V_xc,s[rhofit_up,rhofit_dn] >_num
!
! F_ch_fit = - [ rhofit | d/dR rhofit ]
!
! F_rho_fit = - Sum(s) < d/dR rhofit_s | V_X,s >
!
! F_vxc_fit = - Sum(s) < rhofit_s | d/dR V_X,s >
!
! F_ob_3c = Sum(n,s) < d/dR psi_n,s | T+V_nuc+V_H - eps_n,s | psi_n,s >
!
! F_ob_mda = Sum(n,s) < d/dR psi_n,s | H_KS,s - eps_n,s | psi_n,s >
!
! F_ch_3c = [ rho | d/dR rho_fit ]
!
! F_vxc_3c = Sum(s) < rho_s | d/dR V_X,s >
!
! F_hf_3c = < rho_tot | d/dR V_nuc >
!
! F_nn = d/dR E_nn
!
! Physical force contributions
! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
! 1. Hellmann-Feyman force : F_hf = F_hf_3c + F_nn
! 2. Orbital Pulay correction : F_ob = F_ob_3c + F_ob_num [non-MDA]
! = F_ob_mda [ MDA]
! 3. Charge fit Pulay correction : F_ch = F_ch_3c + F_ch_fit
! 4. XCMDA correction due to d/dR V_X,s : F_vcx = F_vxc_3c + F_vxc_fit
! 5. XCMDA correction due to d/dR rho_s : F_rho = F_rho_num + F_rho_fit
! 6. Numerical grid correction : F_num = F_grid [non-MDA]
! F_grid_mda [ MDA]
!
! Storage
! ~~~~~~~
!
! total_gradients split_gradients type
! ----------------------------------------------------------------------------
! F_grid -grad_xc -grad_grid non-MDA
! F_grid_mda -grad_xc -grad_grid MDA
! F_ob_num -grad_xc -grad_xc non-MDA
! F_rho_num -grad_xc -grad_xc MDA
! F_ch_fit -grad_fit_ch_cartesian -grad_fit_ch_cartesian both
! F_rho_fit -grad_fit_ch_cartesian -grad_mda_rhofit_cartesian MDA
! F_vxc_fit -grad_fit_ch_cartesian -grad_mda_xcpot_cartesian MDA
! F_ob_3c gradient_totalsym gradient_ob_pulay non-MDA
! F_ob_mda gradient_totalsym gradient_ob_pulay MDA
! F_ch_3c gradient_totalsym gradient_ch_pulay both
! F_vxc_3c gradient_totalsym gradient_mda_vxc MDA
! F_hf_3c gradient_totalsym gradient_totalsym both
! F_nn gradient_totalsym gradient_totalsym both
!
! F_hf gradient_totalsym gradient_totalsym both
! F_ob gradient_totalsym gradient_ob_pulay both
! F_ch gradient_totalsym gradient_ch_pulay both
! F_vxc gradient_totalsym gradient_mda_vxc MDA
! F_rho gradient_totalsym gradient_mda_rho MDA
! F_num -grad_xc -grad_grid both
!..............................................................................
!-------------------------------------------------------------------
! Modifications
!-------------------------------------------------------------------
!
! Modification (Please copy before editing)
! Author: A.Hu
! Date: 4/99
! Description: added pseudopotentials contributions to gradients
!
!-------------------------------------------------------------------
!-------------------------------------------------------------------
! Modifications
!-------------------------------------------------------------------
!
! Modification (Please copy before editing)
! Author: AS
! Date: 6/97
! Description: pvm -> comm
!
! Modification (Please copy before editing)
! Author: ...
! Date: ...
! Description: ...
!
!-------------------------------------------------------------------
!------------ Modules used -----------------------------------------
! define FPP_TIMERS 2
# include "def.h"
use type_module ! type specification parameters
use efield_module, only: efield_gradient, efield_applied
use efield_module, only: efield_intensity, efield_gradient_store
use efield_module, only: efield_send_recv
#ifdef _COMPAC_FORTRAN
use datatype
#endif
use output_module ! defines amount of output
use iounitadmin_module ! to open output units
! comm related information and routines
use comm, only: comm_rank, comm_size, comm_reduce
use integralpar_module ! steering information for integral part
use operations_module, only: operations_geo_opt,operations_post_scf, &
operations_core_density,operations_solvation_effect, &
#ifdef WITH_MOLMECH
operations_qm_mm_new, &
#endif
operations_qm_mm, operations_integral
use gradient_data_module, only: gradient_totalsym &
, gradient_cartesian &
, gradient_ob_pulay &
, gradient_ch_pulay &
, gradient_mda_vxc &
, gradient_mda_rho &
, gradient_index &
, grad_fit_ch_cartesian &
, grad_mda_rhofit_cartesian &
, grad_mda_xcpot_cartesian &
, dervs_totalsym &
, dervs_cartesian &
, cpks_gradient_totalsym &
, cpks_grad_fit_totasym &
, cpks_grad_fit_ch_cartesian &
, partial_cartesian &
, gradient_data_setup &
, gradient_data_write_cartesians &
, gradient_data_write_cart_hess &
, gradient_sndrcv_fit_ch &
, gradient_sndrcv_3c &
, send_receive_q_grads &
, gradient_sndrcv_3c &
, gradient_data_write_gxfile &
, gradient_data_shutdown &
, cpks_add_fitmat_ch_grads
#ifdef WITH_EPE
use gradient_data_module, only: calc_cluster_epe_energy, cluster_epe_energy
#endif
#ifdef WITH_MOLMECH
use gradient_data_module, only: qm_grads_to_qmmm
#endif
use occupied_levels_module
use xc_cntrl, only: xc_is_on=>is_on, xc_ANY
use post_scf_module, only: post_scf_deallocate_grad_xc,grad_xc,grad_grid, &
dervs_xc
use grid_module, only: weight_grads
use fit_coeff_module, only: fit_coeff_sndrcv, ICHFIT, IXCFIT, coeff_charge
use options_module, only : options_relativistic, options_split_gradients, &
options_xcmode, xcmode_model_density &
, options_debug_key
use unique_atom_module, only: unique_atoms, N_unique_atoms, &
N_moving_unique_atoms, moving_unique_atom_index, &
unique_atom_grad_info
use pointcharge_module, only: moving_pc, print_pc_grad, &
pc_grad_cart_write, transform_pc_grad_to_cart
#ifdef WITH_EPE
use ewaldpc_module, only: cluster_nuc_epe_en, epe_relaxation
#endif
use point_dqo_module, only: dealloc_dqo,calc_nuc_dqo_grads,dealloc_center_inform,moving_X_centers, &
calc_X_grads,print_X_grad,transform_x_grad_to_cart,x_grad_cart_write
use point_dqo_module, only: moving_R_centers,print_R_grad
use induced_dipoles_module, only: moving_Pol_centers, print_id_grad
use calc_id_module, only: calc_nuc_id_grads,calc_id_grads
use calc_id_module, only: transform_id_grad_to_cart,id_grad_cart_write
#ifdef WITH_EFP
use efp_rep_module, only: dealloc_repf,dealloc_repf_inform,transform_repf_grad_to_cart, &
repf_grad_cart_write
use efp_efp_module, only: efp_efp_gradients
use efp_module, only: efp, n_efp, efp_sum_up_grads, efp_grad_cart_write, efp_write_gxfile
use efp_module, only: qm_fixed
use efp_only_opt_module, only: efp_opt_main
use energy_calc_module, only: get_energy
use efp_solv_grad_module, only: transform_X_solv_grad_to_cart, X_solv_grad_cart_write
#endif
use energy_calc_module, only: set_energy
#ifdef WITH_BGY3D
use bgy3d, only: rism_term
#endif
use solv_cavity_module
use solv_electrostat_module
use solv_2nd_deriv_module, only: nuc_solv_2nd_deriv,charge_solv_2nd_deriv,matrix_2nd_deriv !!!!!!!!!AS
#ifdef WITH_MOLMECH
use qmmm_interface_module !!!!!!!!!!!!!AS
use qmmm1_interface_module !!!!!!!!!!!!AS
#endif
use datatype
use calc3c_switches
use cpksdervs_matrices, only: cpksalloc
#ifdef WITH_SECDER
use cpks_utils, only: cpks_bcast_hr1 &
, cpks_free_hr1
#endif
#ifdef WITH_DFTPU
use dft_plus_u_module, only: dft_plus_u_grad_init &
, dft_plus_u_grad_finalize &
, dft_plus_u_mo_grad_init
#endif
use density_data_module, only: gendensmat_occ, density_data_free
#ifdef WITH_ERI4C
use density_data_module, only: densmat
#endif
#ifdef _DPRINT
use error_module, only: MyID
#endif
use interfaces, only: main_integral, integral_trafo, RELGRAD, RELSDER
use interfaces, only: potential_calculate
use interfaces, only: grad_solv_calculate
#ifdef WITH_EXPERIMENTAL
use symmetry_data_module, only: irrep_dimensions
use cpks_common, only: cpks_alloc_p1w1, cpks_free_p1w1, cpks_p1, cpks_w1
use cpks_utils, only: cpks_calc_p1w1
#endif
#if WITH_ERI4C == 1
use eri4c_options, only: J_exact, K_exact
#endif
implicit none
integer (i4_kind), intent (in) :: loop ! used only on master so far
! *** end of interface ***
integer :: rank
logical :: model_density, split_gradients
integer (kind=i4_kind) :: i, n_equal_max
integer(i4_kind) :: IFIT
integer(i4_kind) :: IREL
logical :: tty ! used in say() subprogram
logical :: xc ! indicates if XC is required at all
integer, parameter :: grads=1, deriv=2
#ifdef WITH_EFP
real (r8_kind) :: energy
#endif
FPP_TIMER_DECL(tot)
FPP_TIMER_DECL(int1)
FPP_TIMER_DECL(rel)
FPP_TIMER_DECL(cpks)
FPP_TIMER_DECL(int2)
!-------------------------------------------------------------------
!------------ Executable code -----------------------------------
! ================ CONTEXT: ALL PROCESSORS ===============
FPP_TIMER_START(tot)
DPRINT MyID,"main_gradient: start"
rank = comm_rank ()
! Tty is used in subrogram say() to determine whether the current
! processor should print the given output:
tty = output_main_gradient .and. rank == 0
! indicates if XC is required at all:
xc = xc_is_on(xc_ANY) .and. operations_post_scf
#ifdef WITH_EFP
if(efp .and. n_efp > 1 .and. qm_fixed) then
DPRINT MyID,"main_gradient: starting EFP optimization"
call efp_opt_main()
DPRINT MyID,"main_gradient: finishing EFP optimization"
return
end if
#endif
! ================ CONTEXT: ALL PROCESSORS ===============
split_gradients = options_split_gradients()
model_density = options_xcmode() == xcmode_model_density
call say ("start")
DPRINT MyID,"main_gradient: call gradient_data_setup() "
call say ("gradient_data_setup")
!
! See matching gradient_data_shutdown() below:
!
call gradient_data_setup()
DPRINT MyID,"done"
! ================ CONTEXT: MASTER ONLY ==================
master1: if (rank == 0) then
solv_eff: if(operations_solvation_effect) then
! Calculations of the additional contributions to the total
! molecular gradients due to the solvent (nonelectronic)
! effect
do_gradients=.true.
if(disp_rep_energy) then
do_cavitation=.false.
do_disp_rep=.true.
call disp_rep_wrap()
if(output_solv_grads) then
call transform_to_cart_grads(grad_solv_cart,grad_solv_totsym)
call gradient_data_write_cartesians( &
' Cartesian Solvation Gradients - Disp - Rep term:',grad_solv_cart)
call add_solv_grads()
grad_solv_totsym=0.0_r8_kind
do i=1,N_moving_unique_atoms
grad_solv_cart(i)%m=0.0_r8_kind
enddo
endif
endif
if(cavitation_energy) then
do_cavitation=.true.
do_disp_rep=.false.
call points_on_cavity_surface()
! This sets energy_cav in solv_cavity_module, among other
! things. If any of the operations_solvation_effect or
! cavitation_energy is not set that energy remains
! uninitialized:
call energy_and_grad_of_cavity()
if(output_solv_grads) then
call transform_to_cart_grads(grad_solv_cart,grad_solv_totsym)
call gradient_data_write_cartesians( &
' Cartesian Solvation Gradients - Cavitation term:',grad_solv_cart)
call add_solv_grads()
grad_solv_totsym=0.0_r8_kind
do i=1,N_moving_unique_atoms
grad_solv_cart(i)%m=0.0_r8_kind
enddo
endif
endif
call say ("solvation setup")
do_cavitation=.false.
do_disp_rep=.false.
call points_on_cavity_surface
call say ("solvation setup done")
endif solv_eff
#ifdef WITH_EFP
if(efp .and. n_efp > 0) call efp_efp_gradients()
#endif
endif master1
! ================ CONTEXT: ALL PROCESSORS ===============
if(.not. operations_integral) goto 1000 ! not to calculate QM gradients
! Here fit_coeff_sndrcv() handles the serial case gracefully. MDA:
! update coeff_xcmda must be sent to each slave else: coeff_charge
! has not yet been send to the slaves.
call say ("fit_coeff_sndrcv")
IFIT = ICHFIT
if (model_density) IFIT = IFIT + IXCFIT
call fit_coeff_sndrcv (IFIT)
if (options_relativistic) then
call integralpar_set ('RelGrads')
else
call integralpar_set ('Gradients')
end if
! Initial steps before computing the gradients in presence of
! electric field. Passing the value of eletric field strength to
! master and slave.
call efield_send_recv()
! Computation of 2e integral derivatives with ERI4C library. TODO:
! AVOID GENERATING DENSITY MATRIX AGAIN....
#if WITH_ERI4C == 1
IF (K_exact .or. J_exact) THEN
call gendensmat_occ()
!
call exact_2e_grads( densmat, gradient_totalsym )
! TODO: in the case of enabled DFT+U densmat is generated again by init
call density_data_free()
ENDIF
#endif
#ifdef WITH_DFTPU
!
! Inital steps before computing DFT+U gradients:
!
call dft_plus_u_grad_init()
call dft_plus_u_mo_grad_init()
#endif
! DONE by integralpar_set(...): integralpar_cpks_contribs =
! .false. Explicit functional dervs only
call say ("main_integral(1)")
FPP_TIMER_START(int1)
call main_integral () ! (1) First call: gradients, maybe second derivatives
FPP_TIMER_STOP(int1)
DPRINT MyID,'main_gradient: calc_3c=',FPP_TIMER_VALUE(t_calc_3center)
DPRINT MyID,'main_gradient: int1=',FPP_TIMER_VALUE(int1)
#ifdef WITH_DFTPU
!
! Final steps after computing DFT+U gradients, Deallocate internal
! structures and 'densmat'
!
call dft_plus_u_grad_finalize()
#endif
if (integralpar_2dervs .and. rank == 0) then
! === context: master only ===
! orbital gradient_totalsym contribs calculated
! to be summed up with fit_charge contribs and
! to be transformed to cartesians
call secder_add_ch_grads() !ito calculate cpks_gradient_totalsym,cpks_grad_fit_totasym, see contains
endif
call say ("gradient_sndrcv_fit_ch")
! GET grad_fit_ch_cartesian : < rhofit | d/dR V_H[rhofit] >
! GET grad_mda_rhofit_cartesian : < d/dR rhofit | V_X[rhofit] >
! GET grad_mda_xcpot_cartesian : < rhofit | d/dR V_X[rhofit] >
call gradient_sndrcv_fit_ch() ! msgtag_grad_ch
! ================ CONTEXT: MASTER ONLY ==================
master2: if (rank == 0) then
call say ("add_fit_ch_grads")
sum_fit_ch_grads: if (split_gradients) then
call add_fit_ch_grads(gradient_ch_pulay,grad_fit_ch_cartesian)
if (model_density) then
call add_fit_ch_grads(gradient_mda_rho,grad_mda_rhofit_cartesian)
call add_fit_ch_grads(gradient_mda_vxc,grad_mda_xcpot_cartesian)
endif
else ! i.e. .not.split_gradients
#define IMPL_CONTRIBS_TO_DERVS
#ifdef IMPL_CONTRIBS_TO_DERVS
call add_fit_ch_grads(gradient_totalsym,grad_fit_ch_cartesian) !fit(1)
#endif
! totalsym func contrib, for coeff grad part see below
! rho contrib only if commented
endif sum_fit_ch_grads
call say ("core_gradient_calc")
call core_gradient_calc() !GET d/dR E_nn
DCALL print_totsym_grads('GRAD: gradient_totalsym fin orbital and fit contribs')
endif master2
! ================ CONTEXT: ALL PROCESSORS ===============
![[=============== Relativistic Transformations: =========
if(options_relativistic) then
IREL = RELGRAD
if(integralpar_2dervs) then
IREL = IREL + RELSDER
endif
DCALL print_totsym_grads('GRAD: gradient_totalsym before integral_trafo')
call say ("start rel trafos")
FPP_TIMER_START(rel)
call integral_trafo(IREL)
FPP_TIMER_STOP(rel)
DPRINT MyID,'main_gradient: rel=',FPP_TIMER_VALUE(rel)
DCALL print_totsym_grads('GRAD: gradient_totalsym after integral_trafo')
endif
!]]=======================================================
1000 continue ! .not. operations_integral
if(operations_solvation_effect) then
call say ("grad_solv_calculate ")
call grad_solv_calculate('SolvGrads')
if(integralpar_2dervs) then
call say ("grad_solv_calculate - Q deriv ")
call grad_solv_calculate('Q_SolvGrads')
end if
if (rank == 0) then
if(VTN) then
call say ("solvation nuc_grad_vtn")
call nuc_grad_vtn(gradient_index)
else
call say ("solvation nuc_grad")
call nuc_grad(gradient_index,integralpar_2dervs)
end if
end if
call say ("solvation matrix_grad")
if(VTN) then
call matrix_grad_vtn(gradient_index)
else
call matrix_grad(gradient_index)
end if
if(integralpar_2dervs) then
if (rank == 0) then
call say ("nuc_solv_2nd_deriv ")
call nuc_solv_2nd_deriv()
end if
end if
endif
if(.not. operations_integral) goto 1001 ! not to calculate QM gradients
call say ("gradient_sndrcv_3c")
! GET gradient_ob_pulay : Sum(n) < d/dR psi_n | H_KS - eps_n | psi_n >
! without the numerical V_xc contribution
! GET gradient_ch_pulay : < rho | d/dR V_H[rhofit] >
! GET gradient_mda_vxc : < rho | d/dR V_X[rhofit] >
! GET gradient_totalsym : < rho | d/dR V_nuc >
call gradient_sndrcv_3c(grads)
if (operations_solvation_effect) then
if (integralpar_2dervs) then
#if 1 /* def IMPL_CONTRIBS_TO_DERVS */
if (rank == 0) then
call charge_solv_2nd_deriv()
end if
#endif
! FIXME: better yet make sure that send_receive_Q_grads()
! handles the serial case:
if (comm_size () > 1) then
call send_receive_Q_grads ()
endif
call matrix_2nd_deriv()
call potential_calculate ('SolvDervs')
end if
endif
call gradient_sndrcv_3c(deriv)
! ================ CONTEXT: MASTER ONLY ==================
master3: if (rank == 0) then
#ifdef IMPL_CONTRIBS_TO_DERVS
if( xc )then
call add_xc_grads(gradient_cartesian,grad_xc)
DCALL print_cart_grads('GRAD: gradient_cartesian only xc contribs')
endif
#endif
call say ("transform_to_cart_grads")
call transform_to_cart_grads(gradient_cartesian,gradient_totalsym)
! below xc gradients were previously added directly to gradient_cartesians
! now gradient calc finished but gradient_totalsym will be modified futher
! in cpks calcs
DCALL print_cart_grads('GRAD: gradient_cartesian with xc contribs')
endif master3
! ================ CONTEXT: ALL PROCESSORS ===============
#ifdef WITH_SECDER
if(integralpar_cpksdervs) then
DPRINT 't_cpksdervs_quad_sums= ',FPP_TIMER_VALUE(t_dervs_quad_sums)
DPRINT MyID//'cpks_g4constructs_______________________________________________'
FPP_TIMER_START(cpks)
call cpks_g4constructs() ! -> parametrization matrix
FPP_TIMER_STOP(cpks)
DPRINT MyID,'main_gradient: cpks=',FPP_TIMER_VALUE(cpks)
! distribute saved rel-ham:
if(options_relativistic) then
! do it before second call to main_integral()
! in a sec-der run.
call cpks_bcast_hr1()
endif
#if 0
! Add fit contribution aX(k) * gY(k,l) * a(l) (moved from main_integral)
call impl_fit_dervs(dervs_totalsym)
#endif
#ifdef WITH_EXPERIMENTAL
! allocate cpks_p1/cpks_w1 matrices from cpks_common:
call cpks_alloc_p1w1(irrep_dimensions,n_spn=1,n_gra=size(cpks,1))
! cf. cpks_free_p1w1
! Compute (yet another) density matrix and energy weited density matrix:
call cpks_calc_p1w1(1,p1=cpks_p1,w1=cpks_w1)
#endif
! FIXME: slave have it allocated?
cpks_gradient_totalsym=0.0_r8_kind
! this term will be recalculated
! to be multiplied on fitcoeff gradients or
! it should not be modified after initial
! calculation
! no need for RelGrads as they were saved before CPKS:
call integralpar_set('Gradients2') ! sets integralpar_cpks_contribs=.true.
call say ("main_integral(2)")
FPP_TIMER_START(int2)
call main_integral ()
FPP_TIMER_STOP(int2)
DPRINT MyID,'main_gradient: calc_3c=',FPP_TIMER_VALUE(t_calc_3center)
DPRINT MyID,'main_gradient: int2=',FPP_TIMER_VALUE(int2)
DPRINT MyID,'main_gradient: w1_sumup=',FPP_TIMER_VALUE(t_w1_sumup)
DPRINT MyID,'main_gradient: p1_sumup=',FPP_TIMER_VALUE(t_p1_sumup)
! no regular gradient_totalsym contibs are required for this call
#if 1
! Add fit contribution aX(k) * gY(k,l) * a(l) (moved from main_integral)
call impl_fit_dervs(dervs_totalsym)
DPRINT MyID//'impl_fit_dervs done'
#endif
if (operations_solvation_effect) then
call say ("grad_solv_calculate: cpks contribution")
call grad_solv_calculate('SolvGrads2')
call potential_calculate ('SolvDervs2')
! NOTE: potential_calculate also calls main_integral()
! that does solvation integrals
endif
#ifdef WITH_EXPERIMENTAL
! deallocate cpks_p1/cpks_w1 matrices from cpks_common:
call cpks_free_p1w1() ! cf. cpks_alloc_p1w1
#endif
! deallocate saved rel-ham:
if(options_relativistic) then
! do it after second call to main_integral(Gradients2)
! in a sec-der run.
call cpks_free_hr1()
endif
! Sum partial contributions to second derivatives on master.
! FIXME: should we do allreduce instead to minimize the
! differences between workers?
call comm_reduce (dervs_totalsym)
endif ! integralpar_cpksdervs
#endif
1001 continue ! .not. operations_integral !!!!!!!!!!!!!
! ================ CONTEXT: MASTER ONLY ==================
master4: if (rank == 0) then
QM2: if(operations_integral) then ! if QM gradients exist
!vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#ifdef WITH_EPE
if(epe_relaxation.and.calc_cluster_epe_energy) then
DPRINT 'main_gradient: cluster_epe_energy',cluster_epe_energy
write(output_unit,*)'energy of interaction of the exact orbital electronic'
write(output_unit,*)'density with epe centers only,cluster_epe_energy'
write(output_unit,*)'main_gradient: cluster_epe_energy',cluster_epe_energy
write(output_unit,*)
write(output_unit,*)'energy of interaction of cluster and epe centers only'
write(output_unit,*)cluster_epe_energy- cluster_nuc_epe_en
write(output_unit,*)
end if
#endif
if(integralpar_2dervs) then
call transform_to_cart_dervs(dervs_cartesian,dervs_totalsym)
DPRINT MyID//'transform_to_cart_dervs done'
endif
! if(integralpar_cpksdervs) &
! call cpks_transform_to_cart_grads(cpks_gradient_cartesian,cpks_gradient_totalsym)
if(operations_solvation_effect) then
call transform_to_cart_grads(grad_solv_cart,grad_solv_totsym)
if(output_solv_grads) then
call gradient_data_write_cartesians( &
' Cartesian Solvation Gradients - Electrostatic term:',grad_solv_cart)
endif
call add_solv_grads()
DCALL print_cart_grads('GRAD: gradient_cartesian with solv contribs')
if(with_pc .and. .not.fixed_pc) call solv_forces_on_pc()
endif
splig_cart: if (split_gradients) then
! 1. Hellmann-Feyman contribution
call gradient_data_write_cartesians( &
' 1. The Hellmann-Feyman force contributions', &
gradient_cartesian)
! 2. orbital Pulay correction
call transform_to_cart_grads(partial_cartesian,gradient_ob_pulay)
if (.not.model_density) then
! GET grad_xc : - < d/dR rho | Vxc[rho] >_num
if(xc) call add_xc_grads(partial_cartesian,grad_xc)
endif
call gradient_data_write_cartesians( &
' 2. The orbital Pulay correction to the forces', &
partial_cartesian)
call sum_up_and_reset_part_grads()
! 3. charge fit Pulay correction
call transform_to_cart_grads(partial_cartesian,gradient_ch_pulay)
call gradient_data_write_cartesians( &
' 3. The charge fit Pulay correction to the forces', &
partial_cartesian)
call sum_up_and_reset_part_grads()
if (weight_grads) then
! 4. numerical grid correction
! GET grad_grid : - Sum(a) (d/dR w_a) exc[rho](r_a)
! - Sum(a) w_a (d/dR r_a) Nabla exc[rho](r_a)
if(xc) call add_xc_grads(partial_cartesian,grad_grid)
call gradient_data_write_cartesians( &
' 4. The numerical grid correction to the forces', &
partial_cartesian)
call sum_up_and_reset_part_grads()
endif
if (model_density) then
! 5. XCMDA correction : d/dR V_X[rhofit]
call transform_to_cart_grads(partial_cartesian,gradient_mda_vxc)
call gradient_data_write_cartesians( &
' 5. The XC force corrections due to d/dR V_xc[rhofit]', &
partial_cartesian)
call sum_up_and_reset_part_grads()
! 6. XCMDA correction : d/dR rhofit
call transform_to_cart_grads(partial_cartesian,gradient_mda_rho)
! GET grad_xc : - < d/dR rhofit | Vxc[rhofit] >_num
if(xc) call add_xc_grads(partial_cartesian,grad_xc)
call gradient_data_write_cartesians( &
' 6. The XC force corrections due to d/dR rhofit', &
partial_cartesian)
call sum_up_and_reset_part_grads()
endif
else splig_cart
call say ("add_xc_grads")
! GET grad_xc : - < d/dR rho | Vxc[rho] >_num
! GET grad_grid : - Sum(a) (d/dR w_a) exc[rho](r_a)
! - Sum(a) w_a (d/dR r_a) Nabla exc[rho](r_a)
! call add_xc_grads(gradient_cartesian,grad_xc)
! grad_xc contribs are directly added to other cartesian contribs
! while othe contribs transforme from the total symmetric contribs
#ifdef WITH_SECDER
if(integralpar_2dervs) then
n_equal_max=maxval(unique_atoms(:)%n_equal_atoms)
if( xc )then
call average_xc_dervs(dervs_xc)
#if 0
print *,'XC hessian cartesian'
call gradient_data_write_cart_hess(stdout_unit,dervs_xc)
#endif
call add_xc_dervs(dervs_cartesian,dervs_xc)
DPRINT MyID//'add_xc_dervs done'
endif
call store_dervs_cart(dervs_cartesian)
call gradient_data_write_cart_hess(stdout_unit,dervs_cartesian)
call gradient_data_write_cart_hess(output_unit,dervs_cartesian)
DPRINT 't_cpksdervs_quad_sums= ',FPP_TIMER_VALUE(t_dervs_quad_sums)
endif
#endif
endif splig_cart
! total force due to presence of electric field (nuclear + electronic) contribution
if (efield_applied()) then
! I guess this adds the nuclear contribution to the gradient_cartesian:
call efield_gradient(gradient_cartesian)
endif
!^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
end if QM2
endif master4
! ================ CONTEXT: ALL PROCESSORS ===============
#ifdef WITH_BGY3D
!
! Compute and add MM contribution to the energy and forces --- here
! due to the RISM solvent environment. The MM term is essentially a
! funciton of geometry only. The corresponding energy term is
! computed here and added to the total energy before writing out the
! gxfile.
!
! FIXME: this energy term does not appear during SCF, so that the
! usual e_sum printed during the iterations is incomplete.
!
! The RISM code is called from the parallel context. By the current
! convention the values (additions actually) of energy and gradients
! are duplicated on all workers. As it happens, at this point in the
! program only the values on master matter for the written gxfile.
! The consistency is however verified in energy_calc_reduce() out of
! paranoya and documentation purposes.
!
block
real (r8_kind) :: e_rism
if (rank == 0) then
DCALL print_cart_grads ("GRAD: before RISM")
endif
! The next call will increment it, just like it will increment
! gradients. Historically the energy contributions are stored
! separately in private variable os the energy_calc_module.
e_rism = 0.0
call rism_term (unique_atoms, e_rism, gradient_cartesian)
call set_energy (rism=e_rism)
if (rank == 0) then
DCALL print_cart_grads ("GRAD: after RISM")
endif
end block
#else
! To stay consistent also in this case:
call set_energy (rism=0.0_r8_kind)
#endif
! ================ CONTEXT: MASTER ONLY ==================
if (rank == 0) then
! If QM gradients exist ... This was in the QM2 block above
! before a huge master-only block has been split:
if (operations_integral) then
call say ("gradient_data_write_cartesians")
call gradient_data_write_cartesians (' Final Cartesian Gradients:', &
gradient_cartesian)
endif
if(moving_pc) then
call say ("PC_grad_to_cart")
call transform_PC_grad_to_cart()
if(print_pc_grad) call pc_grad_cart_write()
end if
if(moving_X_centers .or. moving_R_centers) then
call say ("X_grad_to_cart")
call transform_X_grad_to_cart()
if(print_X_grad .or. print_R_grad) call X_grad_cart_write()
end if
#ifdef WITH_EFP
if(moving_R_centers) then
call say ("repf_grad_to_cart")
call transform_repf_grad_to_cart()
if(print_R_grad) call repf_grad_cart_write()
end if
#endif
if(moving_Pol_centers) then
call say ("id_grad_to_cart")
call transform_id_grad_to_cart()
if(print_id_grad) call id_grad_cart_write()
end if
#ifdef WITH_EFP
if(operations_solvation_effect) then
if(moving_pc) then
call say ("X_solv_grad_to_cart")
call transform_X_solv_grad_to_cart()
if(output_solv_grads .and. print_pc_grad) call X_solv_grad_cart_write()
end if
end if
if(efp) then
call say ("efp_sum_up_grads")
call efp_sum_up_grads()
call efp_grad_cart_write()
end if
#endif
endif ! master only
! ================ CONTEXT: ALL PROCESSORS ===============
!
! FIXME: does it need to be here? Was the allocation intiated
! from this scope? Maybe finalize_geometry() is a better
! place?
!
! make it unconditional and deallocate only what is there
call post_scf_deallocate_grad_xc()
! ================ CONTEXT: MASTER ONLY ==================
master5: if (rank == 0) then
#ifdef WITH_MOLMECH
if(operations_qm_mm_new) then
#ifdef WITH_EFP
if(efp .and. operations_geo_opt) then
call get_energy (tot=energy)
call efp_write_gxfile (energy, gradient_cartesian)
endif
#endif
if(imomm) call qm_grads_to_qmmm()
if(qm_mm .or. qm_mm_1) call qm_grads_to_qmmm1()
else
#endif
if (operations_geo_opt .or. operations_qm_mm) then
call say ("call gradient_data_write_gxfile()")
call gradient_data_write_gxfile (loop)
call say ("done gradient_data_write_gxfile()")
end if
#ifdef WITH_MOLMECH
end if
#endif
endif master5
! ================ CONTEXT: ALL PROCESSORS ===============
DPRINT MyID,"main_gradient: call gradient_data_shutdown()"
!
! See matching gradient_data_setup() above:
!
call gradient_data_shutdown()
FPP_TIMER_STOP(tot)
#ifdef FPP_TIMERS
DPRINT MyID,'main_gradient: TIMING: total =',FPP_TIMER_VALUE(tot)
DPRINT MyID,'main_gradient: TIMING: |-int1 =',FPP_TIMER_VALUE(int1)
DPRINT MyID,'main_gradient: TIMING: |-rel =',FPP_TIMER_VALUE(rel)
DPRINT MyID,'main_gradient: TIMING: |-cpks =',FPP_TIMER_VALUE(cpks)
DPRINT MyID,'main_gradient: TIMING: |-int2 =',FPP_TIMER_VALUE(int2)
#endif
call say ("end")
DPRINT MyID,"main_gradient: end"
contains
#ifdef _DPRINT
subroutine print_cart_grads (header)
implicit none
character (len=*), intent(in), optional :: header
! *** end of interface ***
integer (i4_kind) :: u, e
if (present (header)) print *, header
print '(2A4,3A20)', 'ua', 'ea', 'x', 'y', 'z'
do u = 1, size (gradient_cartesian)
do e = 1, size (gradient_cartesian(u) % m, 2)
print '(2I4,3F20.10)', u, e, gradient_cartesian(u) % m(:, e)
enddo
enddo
end subroutine print_cart_grads
subroutine print_totsym_grads(header)
implicit none
character(len=*), intent(in), optional :: header
! *** end of interface ***
integer(i4_kind) :: n,a
if(present(header)) &
print*,header
print '(2A4,3A20)','from','to','+2','+3','+1'
n = size(gradient_totalsym)/3
do a=0,n-1
print '(2I4,3F20.10)',3*a+1,3*a+3,gradient_totalsym(3*a+2) &
,gradient_totalsym(3*a+3) &
,gradient_totalsym(3*a+1)
enddo
if( 3*n < size(gradient_totalsym) )&
print '(2I4,3F20.10)',3*n+1,3*n+3,gradient_totalsym(3*n+1:)
end subroutine print_totsym_grads
#endif
subroutine say (msg)