├── TGPT-PINN.png
├── TGPT-Reaction
├── __pycache__
│ ├── R_data.cpython-39.pyc
│ ├── R_PINN_train.cpython-39.pyc
│ ├── R_PINN_wav.cpython-39.pyc
│ ├── R_Plotting.cpython-39.pyc
│ ├── R_TGPT_PINN.cpython-39.pyc
│ ├── R_TGPT_train.cpython-39.pyc
│ └── R_TGPT_activation.cpython-39.pyc
├── R_PINN_train.py
├── R_TGPT_activation.py
├── R_PINN_wav.py
├── R_TGPT_train.py
├── R_Plotting.py
├── R_TGPT_PINN.py
├── R_data.py
└── main_TGPT_Reaction.py
├── TGPT-Transport
├── __pycache__
│ ├── T_PINN.cpython-39.pyc
│ ├── T_data.cpython-39.pyc
│ ├── T_GPT_PINN.cpython-39.pyc
│ ├── T_GPT_train.cpython-39.pyc
│ ├── T_Plotting.cpython-39.pyc
│ ├── T_TGPT_PINN.cpython-39.pyc
│ ├── T_PINN_train.cpython-39.pyc
│ ├── T_TGPT_train.cpython-39.pyc
│ ├── T_GPT_activation.cpython-39.pyc
│ ├── T_TGPT_activation.cpython-39.pyc
│ └── choice_widthindex.cpython-39.pyc
├── choice_widthindex.py
├── T_PINN_train.py
├── T_Plotting.py
├── T_TGPT_activation.py
├── T_TGPT_train.py
├── T_PINN.py
├── T_TGPT_PINN.py
├── T_data.py
└── main_TGPT_Trans.py
├── TGPT-function1
├── __pycache__
│ ├── F_data.cpython-39.pyc
│ ├── F_plot.cpython-39.pyc
│ ├── F_GPT_PINN.cpython-39.pyc
│ ├── F_GPT_train.cpython-39.pyc
│ ├── F_TGPT_PINN.cpython-39.pyc
│ ├── F_TGPT_train.cpython-39.pyc
│ ├── F_GPT_activation.cpython-39.pyc
│ └── F_TGPT_activation.cpython-39.pyc
├── F_TGPT_activation.py
├── F_TGPT_train.py
├── F_TGPT_PINN.py
└── main_TGPT_fun1.py
├── TGPT-function2
├── __pycache__
│ ├── T_plot.cpython-39.pyc
│ ├── kink_data.cpython-39.pyc
│ ├── kink_plot.cpython-39.pyc
│ ├── kink_TGPT_PINN.cpython-39.pyc
│ ├── kink_TGPT_train.cpython-39.pyc
│ ├── kink_TGPT_train1.cpython-39.pyc
│ ├── kink_TGPT_activation.cpython-39.pyc
│ ├── nonlinear_function_data.cpython-39.pyc
│ ├── nonlinear_function_plot.cpython-39.pyc
│ ├── nonlinear_function_GPT_PINN.cpython-39.pyc
│ ├── nonlinear_function_GPT_train.cpython-39.pyc
│ ├── nonlinear_function_plotting.cpython-39.pyc
│ ├── main_GPT_NN_nonlinear_function.cpython-39.pyc
│ ├── nonlinear_function_GPT_precomp.cpython-39.pyc
│ ├── nonlinear_function_GPT_activation.cpython-39.pyc
│ └── nonlinear_function_GPT_optimizer.cpython-39.pyc
├── Nonlinear Function.sln
├── kink_TGPT_activation.py
├── Nonlinear Function.pyproj
├── kink_TGPT_train.py
├── kink_TGPT_PINN.py
└── main_TGPT-PINN_kink.py
├── TGPT-function2d
├── __pycache__
│ ├── fun2d_data.cpython-39.pyc
│ ├── fun2d_TGPT_PINN.cpython-39.pyc
│ ├── fun2d_plotting.cpython-39.pyc
│ ├── fun2d_TGPT_train.cpython-39.pyc
│ ├── function2d_plotting.cpython-39.pyc
│ ├── fun2d_TGPT_activation.cpython-39.pyc
│ ├── nonlinear_function_data.cpython-39.pyc
│ ├── nonlinear_function_GPT_PINN.cpython-39.pyc
│ ├── nonlinear_function_plotting.cpython-39.pyc
│ ├── main_GPT_NN_nonlinear_function.cpython-39.pyc
│ ├── nonlinear_function_GPT_precomp.cpython-39.pyc
│ ├── nonlinear_function_GPT_train.cpython-39.pyc
│ ├── nonlinear_function_GPT_optimizer.cpython-39.pyc
│ └── nonlinear_function_GPT_activation.cpython-39.pyc
├── Nonlinear Function.sln
├── fun2d_TGPT_activation.py
├── fun2d_TGPT_train.py
├── fun2d_TGPT_PINN.py
├── Nonlinear Function.pyproj
└── main_TGPT_fun2d.py
├── TGPT-ReactionDiffusion
├── __pycache__
│ ├── RD_data.cpython-39.pyc
│ ├── RD_PINN_train.cpython-39.pyc
│ ├── RD_PINN_wav.cpython-39.pyc
│ ├── RD_Plotting.cpython-39.pyc
│ ├── RD_TGPT_PINN.cpython-39.pyc
│ ├── RD_TGPT_train.cpython-39.pyc
│ └── RD_TGPT_activation.cpython-39.pyc
├── RD_PINN_train.py
├── RD_TGPT_train.py
├── RD_Plotting.py
├── RD_TGPT_activation.py
├── RD_PINN_wav.py
├── RD_TGPT_PINN.py
├── RD_data.py
└── main_TGPT_ReactionDiffusion.py
└── README.md
/TGPT-PINN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-PINN.png
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_plot.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_plot.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/T_plot.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/T_plot.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_PINN_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_PINN_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_PINN_wav.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_PINN_wav.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_Plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_Plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_GPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_GPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_GPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_GPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_Plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_Plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_GPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_GPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_GPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_GPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_plot.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_plot.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/fun2d_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/fun2d_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_PINN_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_PINN_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Reaction/__pycache__/R_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Reaction/__pycache__/R_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_GPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_GPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_GPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_GPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_TGPT_train1.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_TGPT_train1.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/fun2d_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/fun2d_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/fun2d_plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/fun2d_plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/T_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/T_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/__pycache__/choice_widthindex.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-Transport/__pycache__/choice_widthindex.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function1/__pycache__/F_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function1/__pycache__/F_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/fun2d_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/fun2d_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_PINN_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_PINN_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_PINN_wav.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_PINN_wav.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_Plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_Plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/kink_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/kink_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/function2d_plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/function2d_plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_plot.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_plot.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/fun2d_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/fun2d_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-ReactionDiffusion/__pycache__/RD_TGPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_data.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_data.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_GPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_GPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_GPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_GPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_GPT_PINN.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_GPT_PINN.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/main_GPT_NN_nonlinear_function.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/main_GPT_NN_nonlinear_function.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_GPT_precomp.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_GPT_precomp.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/main_GPT_NN_nonlinear_function.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/main_GPT_NN_nonlinear_function.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_GPT_precomp.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_GPT_precomp.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_GPT_train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_GPT_train.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_GPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_GPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2/__pycache__/nonlinear_function_GPT_optimizer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2/__pycache__/nonlinear_function_GPT_optimizer.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_GPT_optimizer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_GPT_optimizer.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-function2d/__pycache__/nonlinear_function_GPT_activation.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DuktigYajie/TGPT-PINN/HEAD/TGPT-function2d/__pycache__/nonlinear_function_GPT_activation.cpython-39.pyc
--------------------------------------------------------------------------------
/TGPT-Transport/choice_widthindex.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | def random_point_in_diagonal_band(nu, N_x, width):
4 | r = random.uniform(0, 10)
5 | if r < 4:
6 | if r < 2:
7 | width = width/4
8 | t = random.randint(0, N_x)
9 | x0 = round(nu*t+3/4*(N_x-1))
10 | x = random.randint(x0-width/2,x0+width/2)% (N_x)
11 | elif r < 8:
12 | if r < 6:
13 | width = width/4
14 | t = random.randint(0, N_x)
15 | x0 = round(nu*t+1/4*(N_x-1))
16 | x = random.randint(x0-width/2,x0+width/2)% (N_x)
17 | else:
18 | x = random.randint(0, N_x)
19 | t = random.randint(0, N_x)
20 | return (t-1)*N_x+x-1
21 |
22 | def random_point_in_initial(IC_pts, width):
23 | r = random.uniform(0, 10)
24 | if r < 4:
25 | x = random.randint((IC_pts-1)/4-width/2,(IC_pts-1)/4+width/2)
26 | elif r < 8:
27 | x = random.randint(3/4*(IC_pts-1)-width/2,3/4*(IC_pts-1)+width/2)
28 | else:
29 | x = random.randint(0, IC_pts)
30 | return x-1
31 |
32 |
33 |
--------------------------------------------------------------------------------
/TGPT-function2/Nonlinear Function.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.0.32014.148
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Nonlinear Function", "Nonlinear Function.pyproj", "{D55228DD-8A33-428B-983E-04D0CE79D9F8}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Any CPU = Debug|Any CPU
11 | Release|Any CPU = Release|Any CPU
12 | EndGlobalSection
13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
14 | {D55228DD-8A33-428B-983E-04D0CE79D9F8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
15 | {D55228DD-8A33-428B-983E-04D0CE79D9F8}.Release|Any CPU.ActiveCfg = Release|Any CPU
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | GlobalSection(ExtensibilityGlobals) = postSolution
21 | SolutionGuid = {65B7EB1F-B100-46C6-86CC-33ACB99AD301}
22 | EndGlobalSection
23 | EndGlobal
24 |
--------------------------------------------------------------------------------
/TGPT-function2d/Nonlinear Function.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.0.32014.148
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Nonlinear Function", "Nonlinear Function.pyproj", "{D55228DD-8A33-428B-983E-04D0CE79D9F8}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Any CPU = Debug|Any CPU
11 | Release|Any CPU = Release|Any CPU
12 | EndGlobalSection
13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
14 | {D55228DD-8A33-428B-983E-04D0CE79D9F8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
15 | {D55228DD-8A33-428B-983E-04D0CE79D9F8}.Release|Any CPU.ActiveCfg = Release|Any CPU
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | GlobalSection(ExtensibilityGlobals) = postSolution
21 | SolutionGuid = {65B7EB1F-B100-46C6-86CC-33ACB99AD301}
22 | EndGlobalSection
23 | EndGlobal
24 |
--------------------------------------------------------------------------------
/TGPT-function2d/fun2d_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | torch.set_default_dtype(torch.float)
4 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
5 |
6 |
7 | class P(nn.Module):
8 | def __init__(self, P_func_nu):
9 | """TGPT-PINN Activation Function"""
10 | super().__init__()
11 | self.layers = [1, 1, 1]
12 |
13 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1], bias=False) for i in range(len(self.layers)-1)])
14 |
15 | #self.linears[0].weight.data = torch.ones(self.layers[1], self.layers[0])
16 | #self.linears[1].weight.data = torch.ones(self.layers[2], self.layers[1])
17 |
18 | self.activation = P_func_nu
19 |
20 | def forward(self, x):
21 | a = self.activation(x.to(device))
22 | #a = x.to(device)
23 | #for i in range(0, len(self.layers)-2):
24 | #z = self.linears[i](a)
25 | #a = self.activation(z)
26 | #a = self.linears[-1](a)
27 | return a
28 |
--------------------------------------------------------------------------------
/TGPT-function1/F_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 | class P(nn.Module):
8 | def __init__(self, P_func_nu):
9 | """TGPT-PINN Activation Function"""
10 | super().__init__()
11 | self.layers = [1, 1, 1]
12 |
13 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1], bias=False) for i in range(len(self.layers)-1)])
14 |
15 | #self.linears[0].weight.data = torch.ones(self.layers[1], self.layers[0]).clone()
16 | #self.linears[1].weight.data = torch.ones(self.layers[2], self.layers[1]).clone()
17 |
18 | self.activation = P_func_nu
19 |
20 | def forward(self, x):
21 | a = self.activation(x.to(device))
22 | #a = x.to(device)
23 | #for i in range(0, len(self.layers)-2):
24 | # z = self.linears[i](a)
25 | # a = self.activation(z)
26 | #a = self.linears[-1](a)
27 | return a
28 |
--------------------------------------------------------------------------------
/TGPT-function2/kink_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 | class P(nn.Module):
8 | def __init__(self, P_func_nu):
9 | """GPT-PINN Activation Function"""
10 | super().__init__()
11 | self.layers = [1, 1, 1]
12 |
13 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1], bias=False) for i in range(len(self.layers)-1)])
14 |
15 | #self.linears[0].weight.data = torch.ones(self.layers[1], self.layers[0]).clone()
16 | #self.linears[1].weight.data = torch.ones(self.layers[2], self.layers[1]).clone()
17 |
18 | self.activation = P_func_nu
19 |
20 | def forward(self, x):
21 | a = self.activation(x.to(device))
22 | # a = x.to(device)
23 | # for i in range(0, len(self.layers)-2):
24 | # z = self.linears[i](a)
25 | # a = self.activation(z)
26 | # a = self.linears[-1](a)
27 | return a
28 |
--------------------------------------------------------------------------------
/TGPT-function2d/fun2d_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, layers_gpt, nu, P, train_xy, u_exact, epochs_gpt, lr_gpt, tol_gpt):
5 |
6 | optimizer = torch.optim.Adam(TGPT_PINN.parameters(), lr=lr_gpt)
7 |
8 | loss_values = TGPT_PINN.loss()
9 | losses=[loss_values.item()]
10 | ep=[0]
11 |
12 | for i in range(1, epochs_gpt+1):
13 | if (loss_values < tol_gpt):
14 | losses.append(loss_values.item())
15 | ep.append(i)
16 | print(f'{nu} stopped at epoch: {i} | gpt_loss: {loss_values.item()} (TGPT_PINN Stopping Criteria Met)\n')
17 | break
18 |
19 | else:
20 | optimizer.zero_grad()
21 | loss_values.backward()
22 | optimizer.step()
23 |
24 | loss_values = TGPT_PINN.loss()
25 |
26 | if (i % 50 == 0) or (i == epochs_gpt):
27 | losses.append(loss_values.item())
28 | ep.append(i)
29 | if (i == epochs_gpt):
30 | print(f"{nu} stopped at epoch: {i} | gpt_loss: {loss_values.item()} (TGPT-PINN Training Completed)\n")
31 |
32 |
33 | return loss_values, losses, ep
--------------------------------------------------------------------------------
/TGPT-function1/F_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, layers_gpt, nu, P, train_x, epochs_gpt, lr_gpt,tol_gpt):
5 |
6 | optimizer = torch.optim.SGD(TGPT_PINN.parameters(), lr=lr_gpt)
7 |
8 | loss_values = TGPT_PINN.loss()
9 | losses=[loss_values.item()]
10 | ep=[0]
11 |
12 | for i in range(1, epochs_gpt+1):
13 | if (loss_values < tol_gpt):
14 | losses.append(loss_values.item())
15 | ep.append(i)
16 | print(f'{nu} stopped at epoch: {i} | gpt_loss: {loss_values.item()} (TGPT_PINN Stopping Criteria Met)\n')
17 | break
18 |
19 | else:
20 | optimizer.zero_grad()
21 | loss_values.backward()
22 | #GPT_PINN.linears[0].bias.grad = None
23 | #GPT_PINN.linears[-1].weight.grad = None
24 | optimizer.step()
25 | loss_values = TGPT_PINN.loss()
26 |
27 | if (i % 500 == 0) or (i == epochs_gpt):
28 | losses.append(loss_values.item())
29 | ep.append(i)
30 | if (i == epochs_gpt):
31 | print(f"{nu} stopped at epoch: {i} | gpt_loss: {loss_values.item()} (TGPT-PINN Training Completed)\n")
32 |
33 | return loss_values, losses, ep
--------------------------------------------------------------------------------
/TGPT-Transport/T_PINN_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def pinn_train(PINN, nu, xt_resid, IC_xt, IC_u, BC1, BC2,
5 | f_hat, epochs_pinn, lr_pinn, tol, xt_test):
6 |
7 | losses = [PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat).item()]
8 | ep = [0]
9 | optimizer = torch.optim.Adam(PINN.parameters(), lr=lr_pinn)
10 |
11 | print(f"Epoch: 0 | Loss: {losses[0]}")
12 | for i in range(1, epochs_pinn+1):
13 | loss_values = PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat)
14 |
15 | if (loss_values.item() < tol):
16 | losses.append(loss_values.item())
17 | ep.append(i)
18 | print(f'Epoch: {i} | Loss: {loss_values.item()} (Stopping Criteria Met)')
19 | break
20 |
21 | optimizer.zero_grad()
22 | loss_values.backward()
23 | optimizer.step()
24 |
25 | if (i % 1000 == 0) or (i == epochs_pinn):
26 | losses.append(loss_values.item())
27 | ep.append(i)
28 | if (i % 5000 == 0) or (i == epochs_pinn):
29 | print(f'Epoch: {i} | loss: {loss_values.item()}')
30 | if (i == epochs_pinn):
31 | print("PINN Training Completed\n")
32 |
33 | return losses, ep, loss_values
34 |
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_PINN_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def pinn_train(PINN, xt_resid, IC_xt, IC_u, BC1, BC2,
5 | f_hat, epochs_pinn, lr_pinn, tol):
6 |
7 | losses = [PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat).item()]
8 | ep = [0]
9 | optimizer = torch.optim.Adam(PINN.parameters(), lr=lr_pinn)
10 |
11 | print(f"Epoch: 0 | Loss: {losses[0]}")
12 | for i in range(1, epochs_pinn+1):
13 | loss_values = PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat)
14 |
15 | if (loss_values.item() < tol):
16 | losses.append(loss_values.item())
17 | ep.append(i)
18 | print(f'Epoch: {i} | Loss: {loss_values.item()} (Stopping Criteria Met)')
19 | break
20 |
21 | optimizer.zero_grad()
22 | loss_values.backward()
23 | optimizer.step()
24 |
25 | if (i % 5000 == 0) or (i == epochs_pinn):
26 | losses.append(loss_values.item())
27 | ep.append(i)
28 | if (i % 10000 == 0) or (i == epochs_pinn):
29 | print(f'Epoch: {i} | loss: {loss_values.item()}')
30 | if (i == epochs_pinn):
31 | print("PINN Training Completed\n")
32 |
33 | return losses, ep
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Transformed Generative Pre-Trained Physics-Informed Neural Networks
2 | Transformed Generative Pre-Trained Physics-Informed Neural Networks (TGPT-PINN), a framework that extends Physics-Informed Neural Networks (PINNs) and reduced basis methods (RBM) to the non- linear model reduction regime while maintaining the type of network structure and the unsupervised nature of its learning.
3 |
4 |
5 | # TGPT-PINN: Nonlinear model reduction with transformed GPT-PINNs
6 | Yanlai Chen, Yajie Ji, Akil Narayan, Zhenli Xu
7 |
8 | Paper Links:
9 | [arXiv](https://arxiv.org/abs/2403.03459) | [ResearchGate](https://www.researchgate.net/publication/379189909_TGPT-PINN_Nonlinear_model_reduction_with_transformed_GPT-PINNs)
10 | --------------------
11 |
12 | Talk/Presentation:
13 | [YouTube](https://www.youtube.com/watch?v=ODA9Po4FVWA)
14 | ---------------
15 |
16 | # TGPT-PINN Architecture
17 | 
18 |
19 | # Citation:
20 | Below you can find the Bibtex citation:
21 |
22 |
23 | @article{chen2024tgpt,
24 | title={TGPT-PINN: Nonlinear model reduction with transformed GPT-PINNs},
25 | author={Chen, Yanlai and Ji, Yajie and Narayan, Akil and Xu, Zhenli},
26 | journal={arXiv preprint arXiv:2403.03459},
27 | year={2024}
28 | }
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/TGPT-Reaction/R_PINN_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def pinn_train(PINN, nu, xt_resid, IC_xt, IC_u, BC1, BC2,
5 | f_hat, epochs_pinn, lr_pinn, tol):
6 |
7 | losses = [PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat).item()]
8 | ep = [0]
9 | optimizer = torch.optim.Adam(PINN.parameters(), lr=lr_pinn)
10 |
11 |
12 |
13 | print(f"Epoch: 0 | Loss: {losses[0]}")
14 | for i in range(1, epochs_pinn+1):
15 | loss_values = PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat)
16 |
17 | if (loss_values.item() < tol):
18 | losses.append(loss_values.item())
19 | ep.append(i)
20 | print(f'Epoch: {i} | Loss: {loss_values.item()} (Stopping Criteria Met)')
21 | break
22 |
23 | optimizer.zero_grad()
24 | loss_values.backward()
25 | optimizer.step()
26 | #optimizer.step(closure)
27 |
28 | if (i % 1000 == 0) or (i == epochs_pinn):
29 | losses.append(loss_values.item())
30 | ep.append(i)
31 | if (i % 5000 == 0) or (i == epochs_pinn):
32 | print(f'Epoch: {i} | loss: {loss_values.item()}')
33 | if (i == epochs_pinn):
34 | print("PINN Training Completed\n")
35 |
36 | loss_values = PINN.loss(xt_resid, IC_xt, IC_u, BC1, BC2, f_hat)
37 |
38 | return losses, ep
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, nu, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_gpt, lr_gpt,
5 | largest_loss=None, largest_case=None,testing=False):
6 |
7 | optimizer = torch.optim.Adam(TGPT_PINN.parameters(), lr=lr_gpt)
8 |
9 | if (testing == False):
10 | loss_values = TGPT_PINN.loss()
11 | for i in range(1, epochs_gpt+1):
12 | if (loss_values < largest_loss):
13 | break
14 |
15 | else:
16 | optimizer.zero_grad()
17 | loss_values.backward()
18 | optimizer.step()
19 | if (i == epochs_gpt):
20 | largest_case = [nu,rho]
21 | largest_loss = TGPT_PINN.loss()
22 | loss_values = TGPT_PINN.loss()
23 |
24 | return largest_loss,largest_case
25 |
26 | elif (testing):
27 | loss_values = TGPT_PINN.loss()
28 | losses=[loss_values.item()]
29 | ep=[0]
30 | for i in range(1, epochs_gpt+1):
31 | optimizer.zero_grad()
32 | loss_values.backward()
33 | optimizer.step()
34 | loss_values = TGPT_PINN.loss()
35 | if (i % 200 == 0) or (i == epochs_gpt):
36 | losses.append(loss_values.item())
37 | ep.append(i)
38 |
39 | return loss_values,losses,ep
--------------------------------------------------------------------------------
/TGPT-function1/F_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import torch
3 | import torch.nn as nn
4 | import numpy as np
5 | from torch import pi
6 | torch.set_default_dtype(torch.float)
7 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8 |
9 |
10 | class GPT(nn.Module):
11 | def __init__(self, layers, nu, P, nu_neurons, initial_c, x_data, u_exact):
12 | super().__init__()
13 | self.layers = layers
14 | self.nu = nu
15 | self.nu_neurons = nu_neurons
16 | self.activation = P
17 |
18 | self.loss_function = nn.MSELoss(reduction='mean').to(device)
19 | self.linears = nn.ModuleList([nn.Linear(layers[0], layers[0]) for i in range(self.layers[1])]+[nn.Linear(layers[1], layers[2],bias=False)])
20 |
21 | self.x_data = x_data.to(device)
22 | self.u_exact = u_exact.to(device)
23 |
24 | for i in range(self.layers[1]):
25 | self.linears[i].weight.data = torch.eye(self.layers[0])
26 | self.linears[i].bias.data = torch.zeros(self.layers[0])
27 |
28 | self.linears[-1].weight.data = initial_c
29 |
30 |
31 |
32 | def forward(self):
33 | a = torch.Tensor().to(device)
34 | test_data=self.x_data.float()
35 | for i in range(0, self.layers[-2]):
36 | z_data = self.linears[i](test_data)
37 | a = torch.cat((a, self.activation[i](z_data[:,:1])), 1)
38 | u_apr = self.linears[-1](a)
39 |
40 | return u_apr
41 |
42 |
43 | def loss(self):
44 | u = self.forward()
45 | loss_L2=self.loss_function(u, self.u_exact)
46 |
47 | return loss_L2
48 |
49 |
50 |
--------------------------------------------------------------------------------
/TGPT-Transport/T_Plotting.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import torch
4 | import scienceplots
5 | plt.style.use(['science', 'notebook'])
6 |
7 | def Trans_plot(xt, u, scale=150, cmap="rainbow", title=None,
8 | dpi=150, figsize=(10,8)):
9 |
10 | shape = [int(np.sqrt(u.shape[0])), int(np.sqrt(u.shape[0]))]
11 |
12 | x = xt[:,0].reshape(shape=shape).transpose(1,0).cpu().detach()
13 | t = xt[:,1].reshape(shape=shape).transpose(1,0).cpu().detach()
14 | u = u.reshape(shape=shape).transpose(1,0).cpu().detach()
15 |
16 | fig, ax = plt.subplots(dpi=dpi, figsize=figsize)
17 | cp = ax.contourf(t, x, u, scale, cmap=cmap)
18 | cbar = fig.colorbar(cp)
19 |
20 | ax.set_xlabel("$t$", fontsize=25)
21 | ax.set_ylabel("$x$", fontsize=25)
22 |
23 | ax.set_xticks([ 0, 0.5, 1.0, 1.5, 2.0])
24 | ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
25 |
26 | ax.tick_params(axis='both', which='major', labelsize=22.5)
27 | ax.tick_params(axis='both', which='minor', labelsize=22.5)
28 | cbar.ax.tick_params(labelsize=17.5)
29 |
30 | if title is not None:
31 | ax.set_title(title, fontsize=30)
32 |
33 | plt.show()
34 |
35 | def loss_plot(epochs, losses, title=None, dpi=150, figsize=(10,8)):
36 | """Training losses"""
37 | plt.figure(dpi=dpi, figsize=figsize)
38 | plt.plot(epochs, losses, c="k", linewidth=3)
39 |
40 | plt.xlabel("Epoch",fontsize=20)
41 | plt.ylabel("Loss", fontsize=20)
42 |
43 | plt.grid(True)
44 | plt.xlim(0,max(epochs))
45 | plt.yscale('log')
46 | plt.tick_params(axis='both', labelsize=22.5)
47 | if title is not None:
48 | plt.title(title,fontsize=30)
49 |
50 | plt.show()
51 |
52 |
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_Plotting.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import torch
4 | from torch import zeros
5 | import scienceplots
6 | plt.style.use(['science', 'notebook'])
7 |
8 | def RD_plot(xt, u, scale=150, cmap="rainbow", title=None,
9 | dpi=150, figsize=(10,8)):
10 |
11 | shape = [int(np.sqrt(u.shape[0])), int(np.sqrt(u.shape[0]))]
12 |
13 | x = xt[:,0].reshape(shape=shape).transpose(1,0).cpu().detach()
14 | t = xt[:,1].reshape(shape=shape).transpose(1,0).cpu().detach()
15 | u = u.reshape(shape=shape).transpose(1,0).cpu().detach()
16 |
17 | fig, ax = plt.subplots(dpi=dpi, figsize=figsize)
18 | cp = ax.contourf(t, x, u, scale, cmap=cmap)
19 | fig.colorbar(cp)
20 |
21 | ax.set_xlabel("$t$", fontsize=25)
22 | ax.set_ylabel("$x$", fontsize=25)
23 |
24 | ax.set_xticks([ 0, 0.2, 0.4, 0.6, 0.8, 1.0])
25 | ax.set_yticks([ 0, round(np.pi/2,3), round(np.pi,3), round(3*np.pi/2,3), round(2*np.pi,3)])
26 |
27 | ax.tick_params(axis='both', which='major', labelsize=22.5)
28 | ax.tick_params(axis='both', which='minor', labelsize=22.5)
29 |
30 | if title is not None:
31 | ax.set_title(title, fontsize=30)
32 |
33 | plt.show()
34 |
35 |
36 | def loss_plot(epochs, losses, title=None, dpi=150, figsize=(10,8)):
37 | """Training losses"""
38 | plt.figure(dpi=dpi, figsize=figsize)
39 | plt.plot(epochs, losses, c="k", linewidth=3)
40 |
41 | plt.xlabel("Epoch", fontsize=20)
42 | plt.ylabel("Loss", fontsize=20)
43 |
44 | plt.grid(True)
45 | plt.xlim(0,max(epochs))
46 | plt.yscale('log')
47 |
48 | if title is not None:
49 | plt.title(title, fontsize=30)
50 |
51 | plt.show()
52 |
--------------------------------------------------------------------------------
/TGPT-Transport/T_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | torch.set_default_dtype(torch.float)
4 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
5 |
6 | class P(nn.Module):
7 | def __init__(self, layers, w1, w2, w3,w4, b1, b2, b3,b4):
8 | super().__init__()
9 | self.layers = layers
10 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1]) for i in range(len(self.layers)-1)])
11 |
12 | self.linears[0].weight.data = torch.Tensor(w1).clone()
13 | self.linears[1].weight.data = torch.Tensor(w2).clone()
14 | self.linears[2].weight.data = torch.Tensor(w3).clone()
15 | self.linears[3].weight.data = torch.Tensor(w4).clone().view(1,self.layers[3])
16 |
17 | self.linears[0].weight.requires_grad = False
18 | self.linears[1].weight.requires_grad = False
19 | self.linears[2].weight.requires_grad = False
20 | self.linears[3].weight.requires_grad = False
21 |
22 | self.linears[0].bias.data = torch.Tensor(b1).clone()
23 | self.linears[1].bias.data = torch.Tensor(b2).clone()
24 | self.linears[2].bias.data = torch.Tensor(b3).clone()
25 | self.linears[3].bias.data = torch.Tensor(b4).clone().view(-1)
26 |
27 | self.linears[0].bias.requires_grad = False
28 | self.linears[1].bias.requires_grad = False
29 | self.linears[2].bias.requires_grad = False
30 | self.linears[3].bias.requires_grad = False
31 |
32 | self.activation = nn.Tanh()
33 |
34 | def forward(self, x):
35 | """TGPT-PINN Activation Function"""
36 | a = x
37 | for i in range(0, len(self.layers)-2):
38 | z = self.linears[i](a)
39 | a = self.activation(z)
40 | a = self.linears[-1](a)
41 | return a
42 |
--------------------------------------------------------------------------------
/TGPT-function2d/fun2d_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import torch
3 | import torch.nn as nn
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 |
8 | class GPT(nn.Module):
9 | def __init__(self, layers, nu, P, initial_c, initial_s, xy_data, u_exact):
10 | super().__init__()
11 | self.layers = layers
12 | self.nu = nu
13 | self.activation = P
14 |
15 | self.loss_function = nn.MSELoss(reduction='mean').to(device)
16 | self.linears = nn.ModuleList([nn.Linear(layers[0], layers[1]),nn.Linear(layers[1], layers[2]),nn.Linear(layers[2], layers[3],bias=False)])
17 | self.xy_data = xy_data.to(device)
18 | self.u_exact = u_exact.to(device)
19 |
20 | self.linears[0].weight.data = torch.ones(self.layers[1], self.layers[0])
21 | self.linears[0].bias.data=initial_s
22 | self.linears[1].weight.data = torch.ones(self.layers[1], self.layers[0])
23 | self.linears[1].bias.data=initial_s
24 | self.linears[2].weight.data = initial_c
25 |
26 |
27 | def forward(self, data):
28 | a = torch.Tensor().to(device)
29 | x = self.linears[0](data[:,0].reshape(data[:,0].shape[0],1))
30 | y = self.linears[1](data[:,1].reshape(data[:,1].shape[0],1))
31 | xy=torch.cat((x,y),1)
32 | for i in range(0, self.layers[2]):
33 | a = torch.cat((a, self.activation[i](xy[:,0:2*(i+1):i+1]).reshape(data.shape[0],1)), 1)
34 | final_output = self.linears[-1](a)
35 |
36 | return final_output
37 |
38 | def loss(self):
39 | u = self.forward(self.xy_data)
40 | loss_u = self.loss_function(u, self.u_exact)
41 |
42 | return loss_u
43 |
44 |
--------------------------------------------------------------------------------
/TGPT-function2/Nonlinear Function.pyproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Debug
4 | 2.0
5 | d55228dd-8a33-428b-983e-04d0ce79d9f8
6 | .
7 | main_GPT_NN_nonlinear_function.py
8 |
9 |
10 | .
11 | .
12 | Nonlinear Function
13 | Nonlinear Function
14 |
15 |
16 | true
17 | false
18 |
19 |
20 | true
21 | false
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/TGPT-function2d/Nonlinear Function.pyproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Debug
4 | 2.0
5 | d55228dd-8a33-428b-983e-04d0ce79d9f8
6 | .
7 | main_GPT_NN_nonlinear_function.py
8 |
9 |
10 | .
11 | .
12 | Nonlinear Function
13 | Nonlinear Function
14 |
15 |
16 | true
17 | false
18 |
19 |
20 | true
21 | false
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/TGPT-Transport/T_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, nu, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_gpt, lr_gpt, tol_gpt):
5 |
6 | optimizer = torch.optim.Adam(TGPT_PINN.parameters(), lr=lr_gpt)
7 |
8 | losses=[TGPT_PINN.loss().item()]
9 | ep=[0]
10 |
11 | change_tol = tol_gpt*100
12 |
13 | #print(f"Epoch: 0 | tgpt-Loss: {losses[0]}")
14 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data} and layer2:{TGPT_PINN.linears[1].weight.data}")
15 | for i in range(1, epochs_gpt+1):
16 | loss_values = TGPT_PINN.loss()
17 |
18 | if (loss_values.item() < tol_gpt):
19 | losses.append(loss_values.item())
20 | ep.append(i)
21 | print(f'{round(nu,3)} stopped at epoch: {i} | Loss: {loss_values.item()} (TGPT_PINN Stopping Criteria Met)')
22 | break
23 |
24 | optimizer.zero_grad()
25 | loss_values.backward()
26 | optimizer.step()
27 |
28 | TGPT_PINN.linears[0].weight.data[0,0] = TGPT_PINN.linears[0].weight.data[0,0]/TGPT_PINN.linears[0].weight.data[0,0]
29 | TGPT_PINN.linears[0].weight.data[0,1] = TGPT_PINN.linears[0].weight.data[0,1]/TGPT_PINN.linears[0].weight.data[0,0]
30 | if (nu*TGPT_PINN.linears[0].weight.data[0,1] > 0):
31 | TGPT_PINN.linears[0].weight.data[0,1] = - TGPT_PINN.linears[0].weight.data[0,1]
32 |
33 | if (i % 500 == 0) or (i == epochs_gpt):
34 | losses.append(loss_values.item())
35 | ep.append(i)
36 | if (i % 5000 == 0) or (i == epochs_gpt):
37 | print(f'{round(nu,3)} stopped at epoch: {i} | gpt_loss: {loss_values.item()}')
38 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data}and layer2:{TGPT_PINN.linears[1].weight.data}")
39 | if (i == epochs_gpt):
40 | print("TGPT-PINN Training Completed")
41 |
42 | if (loss_values.item() < change_tol):
43 | lr_gpt = 0.1*lr_gpt
44 | change_tol = 0.1*change_tol
45 |
46 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data} and layer2:{TGPT_PINN.linears[1].weight.data}\n")
47 | return loss_values, losses, ep
48 |
--------------------------------------------------------------------------------
/TGPT-Transport/T_PINN.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.autograd as autograd
3 | import torch.nn as nn
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 | class NN(nn.Module):
8 | def __init__(self, layers, nu):
9 | super().__init__()
10 | self.layers = layers
11 | self.nu = nu
12 | self.loss_function = nn.MSELoss(reduction='mean')
13 | self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
14 |
15 | for i in range(len(layers)-1):
16 | nn.init.xavier_normal_(self.linears[i].weight.data)
17 | nn.init.zeros_(self.linears[i].bias.data)
18 |
19 | self.activation = nn.Tanh()
20 |
21 | def forward(self, x):
22 | a = x.float()
23 | for i in range(0, len(self.layers)-2):
24 | z = self.linears[i](a)
25 | a = self.activation(z)
26 | a = self.linears[-1](a)
27 | return a
28 |
29 | def lossR(self, xt_residual, f_hat):
30 | """Residual loss function"""
31 | g = xt_residual.clone().requires_grad_()
32 |
33 | u = self.forward(g)
34 | u_xt = autograd.grad(u, g, torch.ones(g.shape[0], 1).to(device),create_graph=True)[0]
35 |
36 | u_x = u_xt[:,[0]]
37 | u_t = u_xt[:,[1]]
38 |
39 | f0 = torch.add(u_t, torch.mul(self.nu, u_x))
40 | f = u_t+self.nu*u_x
41 | d = 0.1*(abs(u_x))+1
42 |
43 | return self.loss_function(f/d, f_hat)
44 |
45 | def lossIC(self, IC_xt, IC_u):
46 | """Initial condition loss function"""
47 |
48 | loss_IC = self.loss_function(self.forward(IC_xt), IC_u)
49 | return loss_IC
50 |
51 | def lossBC(self, BC1, BC2):
52 | """Periodic boundary condition loss function"""
53 |
54 | loss_BC = self.loss_function(self.forward(BC1), self.forward(BC2))
55 | return loss_BC
56 |
57 | def loss(self, xt_resid, IC_xt, IC_u, BC1, BC2, f_hat):
58 | """Total loss function"""
59 | loss_R = self.lossR(xt_resid, f_hat)
60 | loss_IC = self.lossIC(IC_xt, IC_u)
61 | loss_BC = self.lossBC(BC1, BC2)
62 | return loss_R + loss_IC + loss_BC
63 |
--------------------------------------------------------------------------------
/TGPT-function2/kink_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, layers_gpt, nu,epochs_gpt, lr_gpt, x_tol_gpt,u_tol_gpt):
5 |
6 | optimizer = torch.optim.Adam(TGPT_PINN.parameters(), lr=lr_gpt)
7 |
8 | x_loss_values = TGPT_PINN.loss_x()
9 | u_loss_values = TGPT_PINN.loss_u()
10 | x_losses=[x_loss_values.item()]
11 | u_losses=[u_loss_values.item()]
12 | x_ep=[0]
13 | u_ep=[0]
14 |
15 | for i in range(1, epochs_gpt+1):
16 | if (x_loss_values < x_tol_gpt):
17 | x_losses.append(x_loss_values.item())
18 | x_ep.append(i)
19 | break
20 |
21 | else:
22 | optimizer.zero_grad()
23 | x_loss_values.backward()
24 | optimizer.step()
25 |
26 | x_loss_values = TGPT_PINN.loss_x()
27 |
28 | if (i % 50 == 0) or (i == epochs_gpt):
29 | x_losses.append(x_loss_values.item())
30 | x_ep.append(i)
31 |
32 | for i in range(1, epochs_gpt+1):
33 | if (u_loss_values < u_tol_gpt):
34 | u_losses.append(u_loss_values.item())
35 | u_ep.append(i)
36 | break
37 |
38 | else:
39 | optimizer.zero_grad()
40 | u_loss_values.backward()
41 | for j in range(0,layers_gpt[1]):
42 | TGPT_PINN.linears[j].weight.grad = None
43 | TGPT_PINN.linears[j].bias.grad = None
44 | optimizer.step()
45 |
46 | u_loss_values = TGPT_PINN.loss_u()
47 |
48 | if (i % 50 == 0) or (i == epochs_gpt):
49 | u_losses.append(u_loss_values.item())
50 | u_ep.append(i)
51 |
52 |
53 | #for j in range(0,layers_gpt[1]):
54 | # print(f"layer{j}_weight: {TGPT_PINN.linears[j].weight.data.item()} and bias: {TGPT_PINN.linears[j].bias.data.item()} ")
55 | #print(f"layer_ouput:{TGPT_PINN.linears[-1].weight.data}")
56 | x_loss_values = TGPT_PINN.loss_x()
57 | u_loss_values = TGPT_PINN.loss_u()
58 | print(f"{nu} stopped at epoch: {i} | x_loss: {x_loss_values.item()} and u_loss:{u_loss_values.item()}\n")
59 | return x_loss_values, u_loss_values, x_losses, u_losses, x_ep, u_ep
60 |
--------------------------------------------------------------------------------
/TGPT-function2/kink_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import torch
3 | import torch.nn as nn
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 |
8 | class GPT(nn.Module):
9 | def __init__(self, layers, nu, P,nu_neurons, initial_c, x_data, u_exact):
10 | super().__init__()
11 | self.layers = layers
12 | self.nu = nu
13 | self.nu_neurons = nu_neurons
14 | self.activation = P
15 |
16 | self.loss_function = nn.MSELoss().to(device)
17 | self.linears = nn.ModuleList([nn.Linear(layers[0], layers[0]) for i in range(self.layers[1])]+[nn.Linear(layers[1], layers[2],bias=False)])
18 |
19 |
20 | self.x_data = x_data.to(device)
21 | self.u_exact = u_exact.to(device)
22 |
23 |
24 | for i in range(self.layers[1]):
25 | self.linears[i].weight.data = torch.eye(self.layers[0])
26 | self.linears[i].bias.data = torch.zeros(self.layers[0])
27 |
28 | self.linears[-1].weight.data = initial_c
29 |
30 | self.cut_now = torch.div(torch.add(self.nu_neurons,0.4),2).reshape([layers[1],1]).to(device)
31 |
32 | def forward(self,datatype):
33 | if datatype == 'u_loss':
34 | a = torch.Tensor().to(device)
35 | for i in range(0, self.layers[1]):
36 | z = self.linears[i](self.x_data).detach()
37 | a = torch.cat((a, self.activation[i](z[:,:1])), 1)
38 | output = self.linears[-1](a)
39 |
40 | if datatype == 'x_loss':
41 | cut_data = torch.full([self.layers[1],1],(self.nu+0.4)/2).to(device)
42 | x_cut = torch.zeros(self.layers[1],1).to(device)
43 | for i in range(0, self.layers[1]):
44 | x_cut[i] = self.linears[i](cut_data[i]).to(device)
45 | output= x_cut
46 | return output
47 |
48 | def loss_x(self):
49 | x_cut = self.forward(datatype = 'x_loss')
50 | loss_x=self.loss_function(x_cut, self.cut_now)
51 | return loss_x
52 |
53 | def loss_u(self):
54 | u = self.forward(datatype = 'u_loss')
55 | loss_u=self.loss_function(u, self.u_exact)
56 | return loss_u
57 |
--------------------------------------------------------------------------------
/TGPT-function1/main_TGPT_fun1.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | #import os
6 | import time
7 | from functools import partial
8 |
9 | # TGPT-NN
10 | from F_TGPT_activation import P
11 | from F_TGPT_PINN import GPT
12 | from F_TGPT_train import gpt_train
13 |
14 | torch.set_default_dtype(torch.float)
15 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16 | print(f'Current Device: {device}')
17 | if torch.cuda.is_available():
18 | print(f'Current Device Name: {torch.cuda.get_device_name()}')
19 |
20 | def exact_u(nu, x):
21 | u_x=torch.sin(x+nu)
22 | #u_x=abs(x+nu)
23 | #u_x = torch.max(torch.sin(x+nu), torch.zeros([x.shape[0],1]).to(device))
24 | return u_x
25 |
26 | # Domain
27 | Xi, Xf = -np.pi, np.pi
28 | N_train = 201
29 | train_x = torch.linspace(Xi, Xf, N_train)[:,None]
30 |
31 | # Training Parameter Set
32 | number_of_parameters = 11
33 | nu_training = np.linspace(-5,5, number_of_parameters)
34 |
35 | # TGPT-PINN Setting
36 | lr_tgpt = 0.01
37 | epochs_tgpt = 10000
38 | tol_tgpt = 1e-12
39 |
40 | layers_gpt = np.array([1, 1, 1])
41 | initial_c = torch.tensor([1.0],requires_grad=True)[:,None]
42 |
43 | # Activation functions
44 | P_nu = 0.0
45 | P_list = np.ones(1, dtype=object)
46 | P_func_nu = partial(exact_u, P_nu)
47 | P_list[0] = P(P_func_nu).to(device)
48 |
49 | ############################ TGPT-PINN Training ############################
50 | nu_loss = []
51 |
52 | tgpt_train_time_1 = time.perf_counter()
53 | for nu in nu_training:
54 |
55 | u_exact = exact_u(nu, train_x)
56 |
57 | TGPT_NN = GPT(layers_gpt, nu, P_list[0:1],P_nu, initial_c,train_x, u_exact).to(device)
58 |
59 | tgpt_losses = gpt_train(TGPT_NN, layers_gpt, nu, P_list[0:1], train_x, epochs_tgpt, lr_tgpt, tol_tgpt)
60 |
61 | nu_loss.append(tgpt_losses[0])
62 |
63 | tgpt_train_time_2 = time.perf_counter()
64 | print("\nTGPT-PINN Training Completed")
65 | print(f"TGPT Training Time: {(tgpt_train_time_2-tgpt_train_time_1)/3600} Hours")
66 |
67 | largest_loss = max(nu_loss)
68 | largest_loss_list=nu_loss.index(largest_loss)
69 | print(f"Largest Loss (Using 1 Neurons): {largest_loss} at {nu_training[int(largest_loss_list)]}")
70 |
--------------------------------------------------------------------------------
/TGPT-Reaction/R_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | torch.set_default_dtype(torch.float)
4 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
5 |
6 | class WaveAct(nn.Module):
7 | """Activation Function"""
8 | def __init__(self, a1, a2):
9 | super(WaveAct, self).__init__()
10 | self.w1 = a1.clone().to(device)
11 | self.w2 = a2.clone().to(device)
12 |
13 | def forward(self, x):
14 | return self.w1 * torch.sin(x) + self.w2 * torch.cos(x)
15 |
16 | class P(nn.Module):
17 | def __init__(self, layers, w1, w2, w3, w4, b1, b2, b3, b4, a1, a2):
18 | super().__init__()
19 | self.layers = layers
20 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1]) for i in range(len(self.layers)-1)])
21 |
22 | self.linears[0].weight.data = torch.Tensor(w1).clone()
23 | self.linears[1].weight.data = torch.Tensor(w2).clone()
24 | self.linears[2].weight.data = torch.Tensor(w3).clone()
25 | self.linears[3].weight.data = torch.Tensor(w4).clone().view(1,self.layers[3])
26 | #self.linears[4].weight.data = torch.Tensor(w5)
27 | self.linears[0].weight.requires_grad = False
28 | self.linears[1].weight.requires_grad = False
29 | self.linears[2].weight.requires_grad = False
30 | self.linears[3].weight.requires_grad = False
31 |
32 | self.linears[0].bias.data = torch.Tensor(b1).clone()
33 | self.linears[1].bias.data = torch.Tensor(b2).clone()
34 | self.linears[2].bias.data = torch.Tensor(b3).clone()
35 | self.linears[3].bias.data = torch.Tensor(b4).clone().view(-1)
36 | #self.linears[4].bias.data = torch.Tensor(b5)
37 |
38 | self.linears[0].bias.requires_grad = False
39 | self.linears[1].bias.requires_grad = False
40 | self.linears[2].bias.requires_grad = False
41 | self.linears[3].bias.requires_grad = False
42 |
43 | self.activation = WaveAct(a1,a2)
44 | self.activation.w1.requires_grad = False
45 | self.activation.w2.requires_grad = False
46 |
47 | def forward(self, x):
48 | """TGPT-PINN Activation Function"""
49 | a = x
50 | for i in range(0, len(self.layers)-2):
51 | z = self.linears[i](a)
52 | a = self.activation(z)
53 | a = self.linears[-1](a)
54 | return a
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_TGPT_activation.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | torch.set_default_dtype(torch.float)
4 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
5 |
6 | class WaveAct(nn.Module):
7 | """Full PINN Activation Function"""
8 | def __init__(self, a1, a2):
9 | super(WaveAct, self).__init__()
10 | self.w1 = a1.clone().to(device)
11 | self.w2 = a2.clone().to(device)
12 |
13 | def forward(self, x):
14 | return self.w1 * torch.sin(x) + self.w2 * torch.cos(x)
15 |
16 | class P(nn.Module):
17 | def __init__(self, layers, w1, w2, w3, w4, b1, b2, b3, b4, a1, a2):
18 | super().__init__()
19 | self.layers = layers
20 | self.linears = nn.ModuleList([nn.Linear(self.layers[i], self.layers[i+1]) for i in range(len(self.layers)-1)])
21 |
22 | self.linears[0].weight.data = torch.Tensor(w1).clone().to(device)
23 | self.linears[1].weight.data = torch.Tensor(w2).clone().to(device)
24 | self.linears[2].weight.data = torch.Tensor(w3).clone().to(device)
25 | self.linears[3].weight.data = torch.Tensor(w4).clone().to(device).view(1,self.layers[3])
26 |
27 | self.linears[0].weight.requires_grad = False
28 | self.linears[1].weight.requires_grad = False
29 | self.linears[2].weight.requires_grad = False
30 | self.linears[3].weight.requires_grad = False
31 |
32 | self.linears[0].bias.data = torch.Tensor(b1).clone().to(device)
33 | self.linears[1].bias.data = torch.Tensor(b2).clone().to(device)
34 | self.linears[2].bias.data = torch.Tensor(b3).clone().to(device)
35 | self.linears[3].bias.data = torch.Tensor(b4).clone().to(device).view(-1)
36 |
37 | self.linears[0].bias.requires_grad = False
38 | self.linears[1].bias.requires_grad = False
39 | self.linears[2].bias.requires_grad = False
40 | self.linears[3].bias.requires_grad = False
41 |
42 | self.activation = WaveAct(a1,a2)
43 |
44 | self.activation.w1.requires_grad = False
45 | self.activation.w2.requires_grad = False
46 |
47 | def forward(self, x):
48 | """TGPT-PINN Activation Function"""
49 | a = x
50 | for i in range(0, len(self.layers)-2):
51 | z = self.linears[i](a)
52 | a = self.activation(z)
53 | a = self.linears[-1](a)
54 | return a
--------------------------------------------------------------------------------
/TGPT-Transport/T_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.autograd as autograd
4 | torch.set_default_dtype(torch.float)
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 |
8 | class GPT(nn.Module):
9 | def __init__(self, layers, nu, P, f_hat, IC_u, resid_data, IC_data, BC_bottom, BC_top):
10 | super().__init__()
11 | self.layers = layers
12 | self.nu = nu
13 | self.loss_function = nn.MSELoss(reduction='mean')
14 | self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1], bias=False) for i in range(len(layers)-1)])
15 | self.activation = P
16 |
17 | self.resid_data = resid_data
18 | self.IC_data = IC_data
19 | self.BC_bottom = BC_bottom
20 | self.BC_top = BC_top
21 | self.IC_u = IC_u
22 | self.f_hat = f_hat
23 |
24 | self.linears[0].weight.data = torch.tensor([1.0, 0.0]).reshape(self.layers[1],self.layers[0]).float()
25 | self.linears[1].weight.data = torch.ones(self.layers[2], self.layers[1])
26 |
27 |
28 | def forward(self, x_data):
29 | test_data = x_data.float()
30 | xshift_data = (self.linears[0](test_data) +1) % 2 -1
31 | u_data = self.activation(torch.cat((xshift_data.reshape(test_data[:,0].shape[0],1),torch.zeros(test_data[:,0].shape[0],1).to(device)),1))
32 | return u_data
33 |
34 | def lossR(self):
35 | """Residual loss function"""
36 | x = self.resid_data.clone().requires_grad_()
37 | u = self.forward(x)
38 | u_xt = autograd.grad(u, x, torch.ones(x.shape[0], 1).to(device),create_graph=True)[0]
39 | u_x, u_t = u_xt[:,:1],u_xt[:,1:]
40 | f = u_t+self.nu*u_x
41 | d = 0.1*abs(u_x)+1
42 | return self.loss_function(f/d, self.f_hat)
43 |
44 |
45 | def lossIC(self):
46 | """First initial loss function"""
47 | x = self.IC_data.clone().requires_grad_()
48 | return self.loss_function(self.forward(x), self.IC_u)
49 |
50 |
51 | def lossBC(self):
52 | """Periodic boundary condition loss function"""
53 | B1 = self.BC_bottom.clone().requires_grad_()
54 | B2 = self.BC_top.clone().requires_grad_()
55 | return self.loss_function(self.forward(B1), self.forward(B2))
56 |
57 | def loss(self):
58 | """Total Loss Function"""
59 | loss_R = self.lossR()
60 | loss_IC = self.lossIC()
61 | loss_BC = self.lossBC()
62 | return loss_R + loss_IC + loss_BC
63 |
--------------------------------------------------------------------------------
/TGPT-Reaction/R_PINN_wav.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import cos, sin
3 | import torch.autograd as autograd
4 | import torch.nn as nn
5 | torch.set_default_dtype(torch.float)
6 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7 |
8 | class WaveAct(nn.Module):
9 | """Full PINN Activation Function"""
10 | def __init__(self):
11 | super(WaveAct, self).__init__()
12 | self.w1 = nn.Parameter(torch.ones(1))
13 | self.w2 = nn.Parameter(torch.ones(1))
14 |
15 | def forward(self, x):
16 | return self.w1 * torch.sin(x) + self.w2 * torch.cos(x)
17 |
18 | class NN(nn.Module):
19 | def __init__(self, layers, nu):
20 | super().__init__()
21 | self.layers = layers
22 | self.nu = nu
23 | self.loss_function = nn.MSELoss(reduction='mean')
24 | self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
25 |
26 | for i in range(len(layers)-1):
27 | nn.init.xavier_normal_(self.linears[i].weight.data)
28 | nn.init.zeros_(self.linears[i].bias.data)
29 |
30 | self.activation = WaveAct()
31 |
32 | def forward(self, x):
33 | a = x.float()
34 | for i in range(0, len(self.layers)-2):
35 | z = self.linears[i](a)
36 | a = self.activation(z)
37 | a = self.linears[-1](a)
38 | return a
39 |
40 | def lossR(self, xt_residual, f_hat):
41 | """Residual loss function"""
42 | g = xt_residual.clone().requires_grad_()
43 |
44 | u = self.forward(g)
45 | u_xt = autograd.grad(u, g, torch.ones(g.shape[0], 1).to(device),create_graph=True)[0]
46 |
47 | u_t = u_xt[:,[1]]
48 |
49 | f = u_t-self.nu*u*(1-u)
50 |
51 |
52 | return self.loss_function(f, f_hat)
53 |
54 | def lossIC(self, IC_xt, IC_u):
55 | """Initial condition loss function"""
56 |
57 | loss_IC = self.loss_function(self.forward(IC_xt), IC_u)
58 | return loss_IC
59 |
60 | def lossBC(self, BC1, BC2):
61 | """Periodic boundary condition loss function"""
62 |
63 | loss_BC = self.loss_function(self.forward(BC1), self.forward(BC2))
64 | return loss_BC
65 |
66 | def loss(self, xt_resid, IC_xt, IC_u, BC1, BC2, f_hat):
67 | """Total loss function"""
68 | loss_R = self.lossR(xt_resid, f_hat)
69 | loss_IC = self.lossIC(IC_xt, IC_u)
70 | loss_BC = self.lossBC(BC1, BC2)
71 | return loss_R + loss_IC +loss_BC
--------------------------------------------------------------------------------
/TGPT-Reaction/R_TGPT_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.set_default_dtype(torch.float)
3 |
4 | def gpt_train(TGPT_PINN, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_gpt, lr_gpt, tol_gpt,testing=False):
5 |
6 | optimizer = torch.optim.Adam(TGPT_PINN.parameters(), lr=lr_gpt)
7 |
8 | loss_values = TGPT_PINN.loss()
9 | losses=[loss_values.item()]
10 | ep=[0]
11 |
12 | #print(f"Epoch: 0 | tgpt_loss: {losses[0]}")
13 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data} and {TGPT_PINN.linears[0].bias.data} and layer3:{TGPT_PINN.linears[-1].weight.data}")
14 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data}and layer3:{TGPT_PINN.linears[-1].weight.data}")
15 | #print(f'{round(rho,3)} at epoch: 1 | gpt_loss: {loss_values.item()}')
16 | if (testing == False):
17 | loss_values = TGPT_PINN.loss()
18 | for i in range(1, epochs_gpt+1):
19 | if (loss_values < tol_gpt):
20 | losses.append(loss_values.item())
21 | ep.append(i)
22 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data} and {TGPT_PINN.linears[0].bias.data} and layer3:{TGPT_PINN.linears[-1].weight.data}")
23 | print(f'{round(rho,3)} stopped at epoch: {i} | gpt_loss: {loss_values.item()} (TGPT_PINN Stopping Criteria Met)')
24 | break
25 |
26 | else:
27 | optimizer.zero_grad()
28 | loss_values.backward()
29 | optimizer.step()
30 |
31 | loss_values = TGPT_PINN.loss()
32 |
33 | if (i % 500 == 0) or (i == epochs_gpt):
34 | losses.append(loss_values.item())
35 | ep.append(i)
36 | if (i % 5000 == 0) or (i == epochs_gpt):
37 | print(f'{round(rho,3)} stopped at epoch: {i} | tgpt_loss: {loss_values.item()}')
38 | #print(f"layer1:{TGPT_PINN.linears[0].weight.data} and {TGPT_PINN.linears[0].bias.data} and layer3:{TGPT_PINN.linears[-1].weight.data}")
39 | if (i == epochs_gpt):
40 | print("TGPT-PINN Training Completed")
41 | return loss_values, losses, ep
42 |
43 | elif (testing):
44 | loss_values = TGPT_PINN.loss()
45 | losses=[loss_values.item()]
46 | ep=[0]
47 | for i in range(1, epochs_gpt+1):
48 | optimizer.zero_grad()
49 | loss_values.backward()
50 | optimizer.step()
51 | loss_values = TGPT_PINN.loss()
52 | if (i % 200 == 0) or (i == epochs_gpt):
53 | losses.append(loss_values.item())
54 | ep.append(i)
55 | loss_values = TGPT_PINN.loss()
56 | return loss_values,losses,ep
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_PINN_wav.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import cos, sin
3 | import torch.autograd as autograd
4 | import torch.nn as nn
5 | torch.set_default_dtype(torch.float)
6 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7 |
8 | class WaveAct(nn.Module):
9 | """Full PINN Activation Function"""
10 | def __init__(self):
11 | super(WaveAct, self).__init__()
12 | self.w1 = nn.Parameter(torch.ones(1))
13 | self.w2 = nn.Parameter(torch.ones(1))
14 |
15 | def forward(self, x):
16 | return self.w1 * torch.sin(x) + self.w2 * torch.cos(x)
17 |
18 | class NN(nn.Module):
19 | def __init__(self, layers, nu, rho):
20 | super().__init__()
21 | self.layers = layers
22 | self.nu = nu
23 | self.rho = rho
24 | self.loss_function = nn.MSELoss(reduction='mean')
25 | self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
26 |
27 | for i in range(len(layers)-1):
28 | nn.init.xavier_normal_(self.linears[i].weight.data)
29 | nn.init.zeros_(self.linears[i].bias.data)
30 |
31 | self.activation = WaveAct()
32 |
33 | def forward(self, x):
34 | a = x.float()
35 | for i in range(0, len(self.layers)-2):
36 | z = self.linears[i](a)
37 | a = self.activation(z)
38 | a = self.linears[-1](a)
39 | return a
40 |
41 | def lossR(self, xt_residual, f_hat):
42 | """Residual loss function"""
43 | g = xt_residual.clone().requires_grad_()
44 |
45 | u = self.forward(g)
46 | u_xt = autograd.grad(u, g, torch.ones(g.shape[0], 1).to(device),retain_graph=True,create_graph=True)[0]
47 | u_x = u_xt[:,[0]]
48 | u_t = u_xt[:,[1]]
49 |
50 | u_xxtt = autograd.grad(u_x, g, torch.ones(g.shape[0],1).to(device),retain_graph=True,create_graph=True)[0]
51 | u_xx = u_xxtt[:,[0]]
52 | f = u_t-self.nu*u_xx-self.rho*u*(1-u)
53 |
54 | return self.loss_function(f, f_hat)
55 |
56 | def lossIC(self, IC_xt, IC_u):
57 | """Initial condition loss function"""
58 |
59 | loss_IC = self.loss_function(self.forward(IC_xt), IC_u)
60 | return loss_IC
61 |
62 | def lossBC(self, BC1, BC2):
63 | """Periodic boundary condition loss function"""
64 |
65 | loss_BC = self.loss_function(self.forward(BC1), self.forward(BC2))
66 | return loss_BC
67 |
68 | def loss(self, xt_resid, IC_xt, IC_u, BC1, BC2, f_hat):
69 | """Total loss function"""
70 | loss_R = self.lossR(xt_resid, f_hat)
71 | loss_IC = self.lossIC(IC_xt, IC_u)
72 | loss_BC = self.lossBC(BC1, BC2)
73 | return loss_R + loss_IC +loss_BC
--------------------------------------------------------------------------------
/TGPT-Reaction/R_Plotting.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import torch
4 | from torch import zeros
5 | import scienceplots
6 | plt.style.use(['science', 'notebook'])
7 |
8 | def R_plot(xt, u, scale=150, cmap="rainbow", title=None,
9 | dpi=150, figsize=(10,8)):
10 |
11 | shape = [int(np.sqrt(u.shape[0])), int(np.sqrt(u.shape[0]))]
12 |
13 | x = xt[:,0].reshape(shape=shape).transpose(1,0).cpu().detach()
14 | t = xt[:,1].reshape(shape=shape).transpose(1,0).cpu().detach()
15 | u = u.reshape(shape=shape).transpose(1,0).cpu().detach()
16 |
17 | fig, ax = plt.subplots(dpi=dpi, figsize=figsize)
18 | cp = ax.contourf(t, x, u, scale, cmap=cmap)
19 | fig.colorbar(cp)
20 |
21 | ax.set_xlabel("$t$", fontsize=25)
22 | ax.set_ylabel("$x$", fontsize=25)
23 |
24 | ax.set_xticks([ 0, 0.2, 0.4, 0.6, 0.8, 1.0])
25 | ax.set_yticks([ 0, round(np.pi/2,3), round(np.pi,3), round(3*np.pi/2,3), round(2*np.pi,3)])
26 |
27 | ax.tick_params(axis='both', which='major', labelsize=22.5)
28 | ax.tick_params(axis='both', which='minor', labelsize=22.5)
29 |
30 | if title is not None:
31 | ax.set_title(title, fontsize=30)
32 |
33 | plt.show()
34 |
35 | def R_plot_plus(xt, u, scale=150, cmap="rainbow", title=None,
36 | dpi=150, figsize=(10,8)):
37 |
38 | shape = [int(np.sqrt(u.shape[0])), int(np.sqrt(u.shape[0]))]
39 |
40 | x = xt[:,0].reshape(shape=shape).transpose(1,0).cpu().detach()
41 | t = xt[:,1].reshape(shape=shape).transpose(1,0).cpu().detach()
42 | u = u.reshape(shape=shape).transpose(1,0).cpu().detach()
43 |
44 | fig, ax = plt.subplots(dpi=dpi, figsize=figsize)
45 | cp = ax.contourf(t, x, u, scale, cmap=cmap)
46 | fig.colorbar(cp)
47 |
48 | ax.set_xlabel("$t$", fontsize=25)
49 | ax.set_ylabel("$x$", fontsize=25)
50 |
51 | ax.set_xticks([ 0, 2.0, 4.0, 6.0, 8.0, 10.0])
52 | ax.set_yticks([ 0, round(np.pi/2,3), round(np.pi,3), round(3*np.pi/2,3), round(2*np.pi,3)])
53 |
54 | ax.tick_params(axis='both', which='major', labelsize=22.5)
55 | ax.tick_params(axis='both', which='minor', labelsize=22.5)
56 |
57 | if title is not None:
58 | ax.set_title(title, fontsize=30)
59 |
60 | plt.show()
61 |
62 | def loss_plot(epochs, losses, title=None, dpi=150, figsize=(10,8)):
63 | """Training losses"""
64 | plt.figure(dpi=dpi, figsize=figsize)
65 | plt.plot(epochs, losses, c="k", linewidth=3)
66 |
67 | plt.xlabel("Epoch", fontsize=20)
68 | plt.ylabel("Loss", fontsize=20)
69 |
70 | plt.grid(True)
71 | plt.xlim(0,max(epochs))
72 | plt.yscale('log')
73 |
74 | if title is not None:
75 | plt.title(title, fontsize=30)
76 |
77 | plt.show()
78 |
--------------------------------------------------------------------------------
/TGPT-Reaction/R_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch import sin, pi
4 | import torch.autograd as autograd
5 | torch.set_default_dtype(torch.float)
6 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7 |
8 |
9 | class GPT(nn.Module):
10 | def __init__(self, layers, nu, P, c_initial, f_hat, IC_u, resid_data, IC_data, BC_bottom, BC_top):
11 | super().__init__()
12 | self.layers = layers
13 | self.num_neurons = self.layers[1]
14 | self.nu = nu
15 | self.loss_function = nn.MSELoss(reduction='mean')
16 | self.linears = nn.ModuleList([nn.Linear(layers[0], layers[0]) for i in range(self.layers[1])]+[nn.Linear(layers[1], layers[2],bias=False)])
17 | self.activation = P
18 |
19 | self.resid_data = resid_data
20 | self.IC_data = IC_data
21 | self.BC_bottom = BC_bottom
22 | self.BC_top = BC_top
23 | self.IC_u = IC_u
24 | self.f_hat = f_hat
25 |
26 |
27 | for i in range(self.layers[1]):
28 | self.linears[i].weight.data = torch.eye(self.layers[0])
29 | self.linears[i].bias.data=torch.zeros(self.layers[0])
30 |
31 | self.linears[-1].weight.data = c_initial
32 |
33 |
34 | def forward(self, x_data):
35 | test_data = x_data.float()
36 | u_data = torch.Tensor().to(device)
37 | for i in range(0, self.layers[-2]):
38 | shift_data = self.linears[i](test_data)
39 | xshift_data = shift_data[:,:1]%(2*pi)
40 | tshift_data = shift_data[:,1:]
41 | u_data = torch.cat((u_data, self.activation[i](torch.cat((xshift_data,tshift_data),1))), 1)
42 | final_output = self.linears[-1](u_data)
43 | return final_output
44 |
45 | def lossR(self):
46 | """Residual loss function"""
47 | x = self.resid_data.clone().requires_grad_()
48 | u = self.forward(x)
49 | u_xt = autograd.grad(u, x, torch.ones(x.shape[0], 1).to(device),create_graph=True)[0]
50 | u_x, u_t = u_xt[:,:1],u_xt[:,1:]
51 | f = u_t-self.nu*u*(1-u)
52 |
53 | return self.loss_function(f, self.f_hat)
54 |
55 | def lossIC(self):
56 | """Initial loss function"""
57 | x = self.IC_data.clone().requires_grad_()
58 | return self.loss_function(self.forward(x), self.IC_u)
59 |
60 |
61 | def lossBC(self):
62 | """Periodic boundary condition loss function"""
63 | B1 = self.BC_bottom.clone().requires_grad_()
64 | B2 = self.BC_top.clone().requires_grad_()
65 | return self.loss_function(self.forward(B1), self.forward(B2))
66 |
67 | def loss(self):
68 | """Total Loss Function"""
69 | loss_R = self.lossR()
70 | loss_IC = self.lossIC()
71 | loss_BC = self.lossBC()
72 | return loss_R + loss_IC + loss_BC
--------------------------------------------------------------------------------
/TGPT-Reaction/R_data.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.autograd as autograd
4 | from torch import linspace, meshgrid, hstack, zeros, sin, pi, ones, exp
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 |
8 | def initial_u(x, t=0):
9 | u = exp(-(x-pi)**2/(2*(pi/4)**2))
10 | return u
11 |
12 | def exact_u(xt,nu):
13 | u = initial_u(xt[:,:1])*exp(nu*xt[:,1:])/(initial_u(xt[:,:1])*exp(nu*xt[:,1:])+1-initial_u(xt[:,:1]))
14 | return u
15 |
16 | def create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple):
17 | ##########################################################
18 | x_IC = linspace(Xi, Xf, IC_pts)
19 | t_IC = linspace(Ti, Ti, IC_pts)
20 | X_IC, T_IC = meshgrid(x_IC, t_IC, indexing='ij')
21 |
22 | id_ic = np.random.choice(IC_pts, IC_simple, replace=False)
23 | IC_x = X_IC[id_ic,0][:,None]
24 | IC_t = zeros(IC_x.shape[0], 1)
25 | IC_u = initial_u(IC_x)
26 | IC = hstack((IC_x, IC_t))
27 | return (IC, IC_u)
28 |
29 | def create_BC_data(Xi, Xf, Ti, Tf, BC_pts):
30 | ##########################################################
31 | x_BC = linspace(Xi, Xf, BC_pts)
32 | t_BC = linspace(Ti, Tf, BC_pts)
33 | X_BC, T_BC = meshgrid(x_BC, t_BC, indexing='ij')
34 |
35 | BC_bottom_x = X_BC[0,:][:,None]
36 | BC_bottom_t = T_BC[0,:][:,None]
37 | BC_bottom = hstack((BC_bottom_x, BC_bottom_t))
38 |
39 | BC_top_x = X_BC[-1,:][:,None]
40 | BC_top_t = T_BC[-1,:][:,None]
41 | BC_top = hstack((BC_top_x, BC_top_t))
42 | return BC_bottom, BC_top
43 |
44 | def create_residual_data(Xi, Xf, Ti, Tf, N_train, N_test, N_simple):
45 | ##########################################################
46 | x_resid = linspace(Xi, Xf, N_train)
47 | t_resid = linspace(Ti, Tf, N_train)
48 |
49 | XX_resid, TT_resid = meshgrid((x_resid, t_resid), indexing='ij')
50 |
51 | X_resid = XX_resid.transpose(1,0).flatten()[:,None]
52 | T_resid = TT_resid.transpose(1,0).flatten()[:,None]
53 |
54 | xt_resid = hstack((X_resid, T_resid))
55 |
56 | id_f =np.random.choice(N_train**2, N_simple, replace=False)
57 | x_int = X_resid[:, 0][id_f, None]
58 | t_int = T_resid[:, 0][id_f, None]
59 | xt_resid_rd = hstack((x_int, t_int))
60 | f_hat_train = zeros((xt_resid_rd.shape[0], 1))
61 | ##########################################################
62 | x_test = linspace(Xi, Xf, N_test)
63 | t_test = linspace(Ti, Tf, N_test)
64 |
65 | XX_test, TT_test = meshgrid((x_test, t_test), indexing='ij')
66 |
67 | X_test = XX_test.transpose(1,0).flatten()[:,None]
68 | T_test = TT_test.transpose(1,0).flatten()[:,None]
69 |
70 | xt_test = hstack((X_test, T_test))
71 | ##########################################################
72 | return (xt_resid_rd, f_hat_train, xt_test)
73 |
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_TGPT_PINN.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch import sin, pi
4 | import torch.autograd as autograd
5 | torch.set_default_dtype(torch.float)
6 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7 |
8 |
9 | class GPT(nn.Module):
10 | def __init__(self, layers, nu,rho, P, c_initial, f_hat, IC_u, resid_data, IC_data, BC_bottom, BC_top):
11 | super().__init__()
12 | self.layers = layers
13 | self.num_neurons = self.layers[1]
14 | self.nu = nu
15 | self.rho = rho
16 | self.loss_function = nn.MSELoss(reduction='mean')
17 | self.linears = nn.ModuleList([nn.Linear(layers[0], layers[0]) for i in range(self.layers[1])]+[nn.Linear(layers[1], layers[2],bias=False)])
18 | self.activation = P
19 |
20 | self.resid_data = resid_data
21 | self.IC_data = IC_data
22 | self.BC_bottom = BC_bottom
23 | self.BC_top = BC_top
24 | self.IC_u = IC_u
25 | self.f_hat = f_hat
26 |
27 |
28 | for i in range(self.layers[1]):
29 | self.linears[i].weight.data = torch.eye(self.layers[0])
30 | self.linears[i].bias.data=torch.zeros(self.layers[0])
31 |
32 | self.linears[-1].weight.data = c_initial
33 |
34 |
35 | def forward(self, x_data):
36 | test_data = x_data.float()
37 | u_data = torch.Tensor().to(device)
38 | for i in range(0, self.layers[-2]):
39 | shift_data = self.linears[i](test_data)
40 | xshift_data = shift_data[:,:1]%(2*pi)
41 | tshift_data = shift_data[:,1:]
42 | u_data = torch.cat((u_data, self.activation[i](torch.cat((xshift_data,tshift_data),1))), 1)
43 | final_output = self.linears[-1](u_data)
44 | return final_output
45 |
46 | def lossR(self):
47 | """Residual loss function"""
48 | x = self.resid_data.clone().requires_grad_()
49 | u = self.forward(x)
50 | u_xt = autograd.grad(u, x, torch.ones(x.shape[0], 1).to(device),create_graph=True)[0]
51 | u_x, u_t = u_xt[:,[0]],u_xt[:,[1]]
52 | u_xxtt = autograd.grad(u_x, x, torch.ones(x.shape[0],1).to(device),create_graph=True)[0]
53 | u_xx = u_xxtt[:,[0]]
54 | f = u_t-self.nu*u_xx-self.rho*u*(1-u)
55 |
56 | return self.loss_function(f, self.f_hat)
57 |
58 | def lossIC(self):
59 | """First initial loss function"""
60 | x = self.IC_data.clone().requires_grad_()
61 | return self.loss_function(self.forward(x), self.IC_u)
62 |
63 |
64 | def lossBC(self):
65 | """Both boundary condition loss function"""
66 | B1 = self.BC_bottom.clone().requires_grad_()
67 | B2 = self.BC_top.clone().requires_grad_()
68 | return self.loss_function(self.forward(B1), self.forward(B2))
69 |
70 | def loss(self):
71 | """Total Loss Function"""
72 | loss_R = self.lossR()
73 | loss_IC = self.lossIC()
74 | loss_BC = self.lossBC()
75 | return loss_R + loss_IC + loss_BC
--------------------------------------------------------------------------------
/TGPT-function2d/main_TGPT_fun2d.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | import random
6 | #import os
7 | import time
8 | from functools import partial
9 |
10 | # GPT-NN
11 | from fun2d_TGPT_activation import P
12 | from fun2d_TGPT_PINN import GPT
13 | from fun2d_TGPT_train import gpt_train
14 |
15 | torch.set_default_dtype(torch.float)
16 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17 | print(f'Current Device: {device}')
18 | if torch.cuda.is_available():
19 | print(f'Current Device Name: {torch.cuda.get_device_name()}')
20 |
21 | def exact_u(nu, xy):
22 | ##########################################################
23 | x=xy[:,0]
24 | y=xy[:,1]
25 | nu1 = nu[0]
26 | nu2 = nu[1]
27 | u_xy = 1.0/torch.sqrt(torch.add(torch.pow(torch.sub(x,nu1),2),torch.pow(torch.sub(y,nu2),2)))
28 | return u_xy
29 |
30 | def create_xy_data(Xi, Xf, Yi, Yf, Nx, Ny):
31 | ##########################################################
32 | train_x = torch.linspace(Xi, Xf, Nx)
33 | train_y = torch.linspace(Yi, Yf, Ny)
34 |
35 | yy, xx = torch.meshgrid((train_x, train_y), indexing='ij')
36 | X = xx.transpose(1,0).flatten()[:,None]
37 | Y = yy.transpose(1,0).flatten()[:,None]
38 |
39 | u_xy = torch.hstack((X, Y))
40 | return X, Y, u_xy
41 |
42 | # Domain
43 | Xi, Xf = 0.0, 1.0
44 | Yi, Yf = 0.0, 1.0
45 | Nx_train, Ny_train = 101, 101
46 |
47 | uxy_data = create_xy_data(Xi, Xf, Yi, Yf, Nx_train, Ny_train)
48 | train_x = uxy_data[0].to(device)
49 | train_y = uxy_data[1].to(device)
50 | train_xy = uxy_data[2].to(device)
51 |
52 | # Training Parameter Set
53 | nu_training = np.linspace(-1.0,-0.01, 21)
54 | nuxy_training=[]
55 | for i in range(nu_training.shape[0]):
56 | for j in range(nu_training.shape[0]):
57 | nuxy_training.append([nu_training[i],nu_training[j]])
58 | nuxy_training = np.array(nuxy_training)
59 |
60 | # Activation functions
61 | P_nu = [-1, -1]
62 | P_list = np.ones(1, dtype=object)
63 | P_func_nu = partial(exact_u, P_nu)
64 | P_list[0] = P(P_func_nu).to(device)
65 |
66 | # TGPT-PINN Setting
67 | lr_tgpt = 0.025
68 | epochs_tgpt = 2000
69 | tol_tgpt = 1e-11
70 |
71 | layers_gpt = np.array([1, 1, 1, 1])
72 | initial_c = torch.tensor([1.0],requires_grad=True)[:,None]
73 | initial_s = torch.tensor([0.0],requires_grad=True)[:,None]
74 | ############################ TGPT-PINN Training ############################
75 | nu_loss = []
76 |
77 | tgpt_train_time_1 = time.perf_counter()
78 |
79 | for nu in nuxy_training:
80 |
81 | u_exact = exact_u(nu, train_xy).reshape(train_xy.shape[0],1)
82 |
83 | TGPT_NN = GPT(layers_gpt, nu, P_list[0:1], initial_c, initial_s,train_xy, u_exact).to(device)
84 |
85 | tgpt_losses = gpt_train(TGPT_NN, layers_gpt, nu, P_list[0:1], train_xy, u_exact, epochs_tgpt, lr_tgpt, tol_tgpt)
86 |
87 | nu_loss.append(tgpt_losses[0])
88 |
89 | tgpt_train_time_2 = time.perf_counter()
90 | print("\nTGPT-PINN Training Completed")
91 | print(f"TGPT Training Time ({i+1} Neurons): {(tgpt_train_time_2-tgpt_train_time_1)/3600} Hours")
92 |
93 | largest_loss = max(nu_loss)
94 | largest_loss_list=nu_loss.index(largest_loss)
95 | print(f"Largest Loss (Using 1 Neurons): {largest_loss} at {nuxy_training[int(largest_loss_list)]}")
--------------------------------------------------------------------------------
/TGPT-Transport/T_data.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.autograd as autograd
4 | from torch import linspace, meshgrid, hstack, zeros, sin, pi
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 | from choice_widthindex import random_point_in_diagonal_band, random_point_in_initial
7 |
8 | def exact_u(xt,nu):
9 | condition=abs((xt[:,0]-nu*xt[:,1]+1) % 2 -1 )<0.5
10 | u=torch.where(condition,torch.tensor(0.0), torch.tensor(1.0)).to(torch.float32)
11 | return u
12 |
13 | def initial_u(x, t=0):
14 | u = torch.where(abs(x) < 0.5, torch.tensor(0.0), torch.tensor(1.0)).to(torch.float32)
15 | return u
16 |
17 |
18 | def create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple):
19 | ##########################################################
20 | x_IC = linspace(Xi, Xf, IC_pts)
21 | t_IC = linspace(Ti, Ti, IC_pts)
22 | X_IC, T_IC = meshgrid(x_IC, t_IC, indexing='ij')
23 | id_ic = np.zeros(IC_simple,dtype=int)
24 | for i in range(0, IC_simple):
25 | new_id = random_point_in_initial(IC_pts, 2000)
26 | if (new_id in id_ic[0:i]):
27 | new_id = random_point_in_initial(IC_pts, 2000)
28 | else:
29 | id_ic[i] = new_id
30 | IC_x = X_IC[id_ic,0][:,None]
31 | IC_t = zeros(IC_x.shape[0], 1)
32 | IC_u = initial_u(IC_x)
33 | IC = hstack((IC_x, IC_t))
34 | return (IC, IC_u)
35 |
36 | def create_BC_data(Xi, Xf, Ti, Tf, BC_pts):
37 | ##########################################################
38 | x_BC = linspace(Xi, Xf, BC_pts)
39 | t_BC = linspace(Ti, Tf, BC_pts)
40 | X_BC, T_BC = meshgrid(x_BC, t_BC, indexing='ij')
41 |
42 | BC_bottom_x = X_BC[0,:][:,None]
43 | BC_bottom_t = T_BC[0,:][:,None]
44 | BC_bottom = hstack((BC_bottom_x, BC_bottom_t))
45 |
46 | BC_top_x = X_BC[-1,:][:,None]
47 | BC_top_t = T_BC[-1,:][:,None]
48 | BC_top = hstack((BC_top_x, BC_top_t))
49 | return BC_bottom, BC_top
50 |
51 | def create_residual_data(nu, Xi, Xf, Ti, Tf, N_train, N_test, N_simple):
52 | ##########################################################
53 | x_resid = linspace(Xi, Xf, N_train)
54 | t_resid = linspace(Ti, Tf, N_train)
55 |
56 | XX_resid, TT_resid = meshgrid((x_resid, t_resid), indexing='ij')
57 |
58 | X_resid = XX_resid.transpose(1,0).flatten()[:,None]
59 | T_resid = TT_resid.transpose(1,0).flatten()[:,None]
60 | ##########################################################
61 | id_f =np.zeros(N_simple,dtype=int)
62 | for i in range(0, int(np.sqrt(N_simple))):
63 | id_f[i] = random_point_in_initial(N_train, 40)
64 | for i in range(int(np.sqrt(N_simple)), N_simple):
65 | id_f[i] = random_point_in_diagonal_band(nu, N_train, 40)
66 | x_int = X_resid[:, 0][id_f, None]
67 | t_int = T_resid[:, 0][id_f, None]
68 | xt_resid_rd = hstack((x_int, t_int))
69 | f_hat_train = zeros((xt_resid_rd.shape[0], 1))
70 | ##########################################################
71 | x_test = linspace(Xi, Xf, N_test)
72 | t_test = linspace(Ti, Tf, N_test)
73 |
74 | XX_test, TT_test = meshgrid((x_test, t_test), indexing='ij')
75 |
76 | X_test = XX_test.transpose(1,0).flatten()[:,None]
77 | T_test = TT_test.transpose(1,0).flatten()[:,None]
78 |
79 | xt_test = hstack((X_test, T_test))
80 | f_hat_test = zeros((xt_test.shape[0], 1))
81 | ##########################################################
82 | return (xt_resid_rd, f_hat_train, xt_test, f_hat_test)
83 |
--------------------------------------------------------------------------------
/TGPT-function2/main_TGPT-PINN_kink.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | #import os
6 | import time
7 | from functools import partial
8 |
9 | # GPT-NN
10 | from kink_TGPT_activation import P
11 | from kink_TGPT_PINN import GPT
12 | from kink_TGPT_train import gpt_train
13 |
14 | torch.set_default_dtype(torch.float)
15 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16 | print(f'Current Device: {device}')
17 | if torch.cuda.is_available():
18 | print(f'Current Device Name: {torch.cuda.get_device_name()}')
19 |
20 | def exact_u(nu, x):
21 | h_x=torch.sub(torch.div(x,0.4+nu),1).to(device)
22 | psi_x=torch.exp(torch.reciprocal(torch.sub(torch.pow(h_x,2),1))).to(device)
23 | u_x = torch.mul(torch.mul(torch.where(h_x>-1, torch.ones(x.shape[0],1).to(device), torch.zeros([x.shape[0],1]).to(device)),torch.where(h_x<-1/2,torch.ones(x.shape[0],1).to(device), torch.zeros([x.shape[0],1]).to(device))), psi_x)
24 | u_x = torch.where(torch.isnan(u_x), torch.full_like(u_x, 0), u_x)
25 |
26 | return u_x
27 |
28 | # Domain
29 | Xi, Xf = -1.0, 1.0
30 | N_train, N_test = 100, 100
31 | train_x = torch.linspace(Xi, Xf, N_train)[:,None]
32 |
33 | # Training Parameter Set
34 | number_of_parameters = 11
35 | nu_training = np.linspace(0, 1, number_of_parameters)
36 | nu_plot = nu_training[0:nu_training.shape[0]:5]
37 |
38 | number_of_neurons = 10
39 | nu_neurons = torch.zeros(number_of_neurons).to(device) # Neuron parameters
40 | nu_neurons[0] = 1.0
41 |
42 | largest_loss = np.zeros(number_of_neurons)
43 | largest_loss_list = np.ones(number_of_neurons)
44 | P_list = np.ones(number_of_neurons, dtype=object)
45 | print(f"Expected Final GPT-PINN Depth: {[1,number_of_neurons,1]}\n")
46 |
47 |
48 | # TGPT-PINN Setting
49 | lr_tgpt = 0.01
50 | epochs_tgpt = 5000
51 | x_tol_tgpt = 1e-14
52 | u_tol_tgpt = 1e-6
53 |
54 | total_train_time_1 = time.perf_counter()
55 | # ############################### Training Loop ################################
56 | # ##############################################################################
57 | for i in range(0, number_of_neurons):
58 |
59 | # Add new activation functions
60 | P_nu = nu_neurons[i]
61 | P_func_nu = partial(exact_u, P_nu)
62 | P_list[i] = P(P_func_nu).to(device)
63 |
64 | ############################ TGPT-PINN Training ############################
65 | # Finding The Next Neuron
66 | nu_x_loss = []
67 | nu_u_loss = []
68 |
69 | layers_gpt = np.array([1, i+1, 1])
70 | c_initial = torch.full((1,i+1), 1/(i+1))
71 | for nu in nu_training:
72 | u_exact = exact_u(nu, train_x)
73 |
74 | TGPT_NN = GPT(layers_gpt, nu, P_list[0:i+1],nu_neurons[0:i+1], c_initial, train_x, u_exact).to(device)
75 |
76 | tgpt_losses = gpt_train(TGPT_NN, layers_gpt, nu, epochs_tgpt, lr_tgpt,x_tol_tgpt,u_tol_tgpt)
77 |
78 | nu_u_loss.append(tgpt_losses[1])
79 |
80 | print("\nTGPT-PINN Training Completed")
81 |
82 | largest_loss[i] = max(nu_u_loss)
83 | largest_loss_list[i]=nu_u_loss.index(largest_loss[i])
84 | print(f"Largest Loss (Using 1 Neurons): {largest_loss[i]} at {nu_training[int(largest_loss_list[i])]}")
85 |
86 | if (i+1 < number_of_neurons):
87 | nu_neurons[i+1] = nu_training[int(largest_loss_list[i])]
88 | print(f"Next parameter Case: {nu_neurons[i+1]}")
89 |
90 | total_train_time_2 = time.perf_counter()
91 | print(f"Find parameters:{nu_neurons}")
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/RD_data.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.autograd as autograd
4 | from torch import linspace, meshgrid, hstack, zeros, sin, pi, ones, exp
5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6 |
7 |
8 | def initial_u(x, t=0):
9 | u = exp(-(x-pi)**2/(2*(pi/4)**2))
10 | return u
11 |
12 | def exact_u(Xi,Xf,Ti,Tf,N_test,nu,rho):
13 | x = np.linspace(Xi, Xf, N_test, dtype=np.float32)
14 | dx = (Xf-Xi)/N_test
15 | Nt_test=100*N_test
16 | t = np.linspace(Ti,Tf,Nt_test, dtype=np.float32)
17 | dt = (Tf-Ti)/Nt_test
18 | u= np.zeros((N_test,Nt_test),dtype=np.float32)
19 | u[:,0] = np.exp(-(x-np.pi)**2/(2*(np.pi/4)**2))
20 | IKX_pos = 1j * np.arange(0, N_test/2+1, 1)
21 | IKX_neg = 1j * np.arange(-N_test/2+1, 0, 1)
22 | IKX = np.concatenate((IKX_pos, IKX_neg))
23 | IKX2 = (IKX * IKX).astype(np.complex64)
24 | for n in range(Nt_test-1):
25 | u_half = (u[:,n]*np.exp(rho*dt))/(u[:,n]*np.exp(rho*dt)+1-u[:,n])
26 | u[:,n+1] = np.fft.ifft(np.fft.fft(u_half)*np.exp((dt)*nu*IKX2)).real
27 |
28 | u_output = u[:, ::100].T
29 | return u_output
30 |
31 | def create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple):
32 | ##########################################################
33 | x_IC = linspace(Xi, Xf, IC_pts)
34 | t_IC = linspace(Ti, Ti, IC_pts)
35 | X_IC, T_IC = meshgrid(x_IC, t_IC, indexing='ij')
36 |
37 | id_ic = np.random.choice(IC_pts, IC_simple, replace=False)
38 | IC_x = X_IC[id_ic,0][:,None]
39 | IC_t = zeros(IC_x.shape[0], 1)
40 | IC_u = initial_u(IC_x)
41 | IC = hstack((IC_x, IC_t))
42 | return (IC, IC_u)
43 |
44 | def create_BC_data(Xi, Xf, Ti, Tf, BC_pts):
45 | ##########################################################
46 | x_BC = linspace(Xi, Xf, BC_pts)
47 | t_BC = linspace(Ti, Tf, BC_pts)
48 | X_BC, T_BC = meshgrid(x_BC, t_BC, indexing='ij')
49 |
50 | BC_bottom_x = X_BC[0,:][:,None]
51 | BC_bottom_t = T_BC[0,:][:,None]
52 | BC_bottom = hstack((BC_bottom_x, BC_bottom_t))
53 |
54 | BC_top_x = X_BC[-1,:][:,None]
55 | BC_top_t = T_BC[-1,:][:,None]
56 | BC_top = hstack((BC_top_x, BC_top_t))
57 | return BC_bottom, BC_top
58 |
59 | def create_residual_data(Xi, Xf, Ti, Tf, N_train, N_test, N_simple):
60 | ##########################################################
61 | x_resid = linspace(Xi, Xf, N_train)
62 | t_resid = linspace(Ti, Tf, N_train)
63 |
64 | XX_resid, TT_resid = meshgrid((x_resid, t_resid), indexing='ij')
65 |
66 | X_resid = XX_resid.transpose(1,0).flatten()[:,None]
67 | T_resid = TT_resid.transpose(1,0).flatten()[:,None]
68 |
69 | xt_resid = hstack((X_resid, T_resid))
70 |
71 | id_f =np.random.choice(N_train**2, N_simple, replace=False)
72 | x_int = X_resid[:, 0][id_f, None]
73 | t_int = T_resid[:, 0][id_f, None]
74 | xt_resid_rd = hstack((x_int, t_int))
75 | f_hat_train = zeros((xt_resid_rd.shape[0], 1))
76 | ##########################################################
77 | x_test = linspace(Xi, Xf, N_test)
78 | t_test = linspace(Ti, Tf, N_test)
79 |
80 | XX_test, TT_test = meshgrid((x_test, t_test), indexing='ij')
81 |
82 | X_test = XX_test.transpose(1,0).flatten()[:,None]
83 | T_test = TT_test.transpose(1,0).flatten()[:,None]
84 |
85 | xt_test = hstack((X_test, T_test))
86 | ##########################################################
87 | return (xt_resid_rd, f_hat_train, xt_test)
88 |
--------------------------------------------------------------------------------
/TGPT-Transport/main_TGPT_Trans.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | #from torch import linspace
6 | import os
7 | import time
8 |
9 | from T_data import create_residual_data, create_IC_data, create_BC_data, exact_u
10 | from T_Plotting import Trans_plot, loss_plot
11 |
12 | # Full PINN
13 | from T_PINN import NN
14 | from T_PINN_train import pinn_train
15 |
16 | # Transport equation TGPT-PINN
17 | from T_TGPT_activation import P
18 | from T_TGPT_PINN import GPT
19 | from T_TGPT_train import gpt_train
20 |
21 | torch.set_default_dtype(torch.float)
22 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23 | print(f"Current Device: {device}")
24 | if torch.cuda.is_available():
25 | print(f"Current Device Name: {torch.cuda.get_device_name()}")
26 |
27 |
28 | nu_pinn_train = 0.0
29 |
30 | lr_pinn = 0.001
31 | epochs_pinn = 400000
32 | layers_pinn = np.array([2, 20,20,20,1])
33 | tol = 1e-6
34 |
35 | # Domain and Data
36 | Xi, Xf = -1.0, 1.0
37 | Ti, Tf = 0.0, 2.0
38 | N_train, N_test, N_simple = 201, 100, 10000
39 | IC_pts, IC_simple = 10001, 1000
40 | BC_pts = 1001
41 |
42 | residual_data = create_residual_data(nu_pinn_train, Xi, Xf, Ti, Tf, N_train, N_test, N_simple)
43 | xt_resid = residual_data[0].to(device)
44 | f_hat = residual_data[1].to(device)
45 | xt_test = residual_data[2].to(device)
46 | #plt.scatter(xt_resid[:,1].detach().cpu(),xt_resid[:,0].detach().cpu(),s=1)
47 | #plt.scatter(xt_resid[0:int(np.sqrt(N_simple)),1].detach().cpu(),xt_resid[0:int(np.sqrt(N_simple)),0].detach().cpu(),s=10)
48 |
49 | IC_data = create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple)
50 | IC_xt = IC_data[0].to(device)
51 | IC_u = IC_data[1].to(device)
52 | #plt.scatter(IC_xt[:,1].detach().cpu(),IC_xt[:,0].detach().cpu(),s=1)
53 |
54 | BC_data = create_BC_data(Xi, Xf, Ti, Tf, BC_pts)
55 | BC_bottom = BC_data[0].to(device)
56 | BC_top = BC_data[1].to(device)
57 |
58 | total_train_time_1 = time.perf_counter()
59 | print("******************************************************************")
60 | ########################### Full PINN Training ############################
61 | pinn_train_time_1 = time.perf_counter()
62 | PINN = NN(layers_pinn, nu_pinn_train).to(device)
63 |
64 | pinn_losses = pinn_train(PINN, nu_pinn_train, xt_resid, IC_xt, IC_u, BC_bottom, BC_top, f_hat, epochs_pinn, lr_pinn, tol, xt_test)
65 |
66 | pinn_train_time_2 = time.perf_counter()
67 | print(f"PINN Training Time: {(pinn_train_time_2-pinn_train_time_1)/3600} Hours")
68 |
69 | w1 = PINN.linears[0].weight.detach().cpu()
70 | w2 = PINN.linears[1].weight.detach().cpu()
71 | w3 = PINN.linears[2].weight.detach().cpu()
72 | w4 = PINN.linears[3].weight.detach().cpu()
73 |
74 | b1 = PINN.linears[0].bias.detach().cpu()
75 | b2 = PINN.linears[1].bias.detach().cpu()
76 | b3 = PINN.linears[2].bias.detach().cpu()
77 | b4 = PINN.linears[3].bias.detach().cpu()
78 |
79 | gpt_activation = P(layers_pinn, w1, w2, w3,w4, b1, b2, b3,b4).to(device)
80 |
81 | lr_tgpt = 0.05
82 | epochs_tgpt = 100000
83 | tol_tgpt = 1e-5
84 | layers_tgpt = [2,1,1]
85 |
86 | #################### Training TGPT-PINN ######################
87 | nu = -10.0
88 | xt_train_nu = create_residual_data(nu, Xi, Xf, Ti, Tf, N_train, N_test, N_simple)[0].to(device)
89 | #plt.scatter(xt_train_nu[0:int(np.sqrt(N_simple)),1].detach().cpu(),xt_train_nu[0:int(np.sqrt(N_simple)),0].detach().cpu(),s=10)
90 | TGPT_PINN = GPT(layers_tgpt, nu, gpt_activation, f_hat, IC_u, xt_train_nu, IC_xt, BC_bottom, BC_top).to(device)
91 |
92 | tgpt_losses = gpt_train(TGPT_PINN, nu, f_hat, IC_u, xt_train_nu, xt_test, IC_xt, BC_bottom, BC_top, epochs_tgpt, lr_tgpt, tol_tgpt)
93 |
94 | Trans_plot(xt_test, TGPT_PINN.forward(xt_test), title=fr"$\nu={round(nu,1)}$")
95 | Trans_plot(xt_test, exact_u(xt_test,nu), title=fr"$\nu={round(nu,1)}$")
96 | Trans_plot(xt_test,abs(TGPT_PINN.forward(xt_test)-exact_u(xt_test,nu)),title=fr"$\nu={round(nu,1)}$")
97 | loss_plot(tgpt_losses[2], tgpt_losses[1], title=fr"$\nu={round(nu,1)}$")
--------------------------------------------------------------------------------
/TGPT-Reaction/main_TGPT_Reaction.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | #from torch import linspace
6 | import os
7 | import time
8 |
9 | from R_data import create_residual_data, create_IC_data, create_BC_data, exact_u
10 | from R_Plotting import R_plot, loss_plot
11 |
12 | # Full PINN
13 | from R_PINN_wav import NN
14 | from R_PINN_train import pinn_train
15 |
16 | # Transport equation GPT-PINN
17 | from R_TGPT_activation import P
18 | from R_TGPT_PINN import GPT
19 | from R_TGPT_train import gpt_train
20 |
21 | torch.set_default_dtype(torch.float)
22 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23 | print(f"Current Device: {device}")
24 | if torch.cuda.is_available():
25 | print(f"Current Device Name: {torch.cuda.get_device_name()}")
26 |
27 | # Training Parameter Set
28 | rho_training = np.linspace(1,10,31)
29 | #rho_test = np.linspace(1.15,9.85,30)
30 | rho_pinn_train = 1.0
31 |
32 | # Domain and Data
33 | Xi, Xf = 0.0, 2*np.pi
34 | Ti, Tf = 0.0, 1.0
35 | N_train, N_test, N_simple = 101, 100, 10000
36 | IC_pts, IC_simple = 101, 101
37 | BC_pts = 101
38 |
39 | train_data = create_residual_data(Xi, Xf, Ti, (rho_training[-1]/rho_training[0])*Tf, N_train, N_test, N_simple)
40 | xt_train = train_data[0].to(device)
41 | f_train = train_data[1].to(device)
42 |
43 | residual_data = create_residual_data(Xi, Xf, Ti, Tf, N_train, N_test, N_simple)
44 | xt_resid = residual_data[0].to(device)
45 | f_hat = residual_data[1].to(device)
46 | xt_test = residual_data[2].to(device)
47 |
48 | IC_data = create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple)
49 | IC_xt = IC_data[0].to(device)
50 | IC_u = IC_data[1].to(device)
51 |
52 | BC_data = create_BC_data(Xi, Xf, Ti, Tf, BC_pts)
53 | BC1 = BC_data[0].to(device)
54 | BC2 = BC_data[1].to(device)
55 |
56 | # PINN Setting
57 | lr_pinn = 0.001
58 | epochs_pinn = 120000
59 | layers_pinn = np.array([2, 20,20,20,1])
60 | tol_pinn = 1e-6
61 |
62 | # TGPT Setting
63 | lr_tgpt = 0.005
64 | epochs_tgpt = 10000
65 | tol_tgpt = 1e-5
66 | layers_gpt = np.array([2, 1, 1])
67 | P_list = np.ones(1, dtype=object)
68 |
69 | ###############################################################################
70 | ################################ Training Loop ################################
71 | ###############################################################################
72 |
73 | ########################### Full PINN Training ############################
74 | print(f"Begin Full PINN Training: rho = {rho_pinn_train}")
75 | pinn_train_time_1 = time.perf_counter()
76 | PINN = NN(layers_pinn, rho_pinn_train).to(device)
77 |
78 | pinn_losses = pinn_train(PINN, rho_pinn_train, xt_train, IC_xt, IC_u, BC1, BC2,f_train, epochs_pinn, lr_pinn, tol_pinn)
79 |
80 | pinn_train_time_2 = time.perf_counter()
81 | print(f"PINN Training Time: {(pinn_train_time_2-pinn_train_time_1)/3600} Hours")
82 |
83 | w1 = PINN.linears[0].weight.detach().cpu()
84 | w2 = PINN.linears[1].weight.detach().cpu()
85 | w3 = PINN.linears[2].weight.detach().cpu()
86 | w4 = PINN.linears[3].weight.detach().cpu()
87 |
88 | b1 = PINN.linears[0].bias.detach().cpu()
89 | b2 = PINN.linears[1].bias.detach().cpu()
90 | b3 = PINN.linears[2].bias.detach().cpu()
91 | b4 = PINN.linears[3].bias.detach().cpu()
92 |
93 | a1 = PINN.activation.w1.detach().cpu()
94 | a2 = PINN.activation.w2.detach().cpu()
95 |
96 | # Add new activation functions
97 | P_list[0] = P(layers_pinn, w1, w2, w3, w4, b1, b2, b3, b4, a1, a2).to(device)
98 |
99 | ############################ TGPT-PINN Training ############################
100 | rho_loss = []
101 | gpt_train_time_1 = time.perf_counter()
102 | for rho in rho_training:
103 | c_initial = torch.tensor([1.0],requires_grad=True)[:,None]
104 |
105 | TGPT_PINN = GPT(layers_gpt, rho, P_list[0:1], c_initial, f_hat, IC_u, xt_resid, IC_xt, BC1, BC2).to(device)
106 |
107 | tgpt_losses = gpt_train(TGPT_PINN, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_tgpt, lr_tgpt, tol_tgpt,testing=True)
108 |
109 | rho_loss.append(tgpt_losses[0].item())
110 |
111 | #R_plot(xt_test, TGPT_PINN.forward(xt_test), title=fr"TGPT_PINN Solution $\rho={round(rho,3)}$")
112 | #R_plot(xt_test, abs(TGPT_PINN.forward(xt_test)-exact_u(xt_test,rho).reshape(xt_test.shape[0],1)),title=fr"TGPT_PINN Solution error $\rho={round(rho,3)}$")
113 | #loss_plot(tgpt_losses[2], tgpt_losses[1], title=fr"TGPT_PINN Losses $\rho={round(rho,3)}$")
114 | #rMAE = max(sum(abs(TGPT_PINN.forward(xt_test)-exact_u(xt_test,rho)))/sum(abs(exact_u(xt_test,rho))))
115 | #rRMSE = max(sum((TGPT_PINN.forward(xt_test)-exact_u(xt_test,rho))**2)/sum((exact_u(xt_test,rho))**2))
116 | #print(f"TGPT-PINN at {rho} with the rMAE = {rMAE} and rRMSE = {rRMSE}")
117 |
118 | gpt_train_time_2 = time.perf_counter()
119 | print(f"\nGPT Training Time: {(gpt_train_time_2-gpt_train_time_1)/3600} Hours")
120 |
121 | largest_loss = max(rho_loss)
122 | largest_loss_list =rho_loss.index(largest_loss)
123 |
124 | print(f"Largest Loss (Using 1 Neurons): {largest_loss} at {rho_training[int(largest_loss_list)]}")
125 |
126 |
127 | ########################## TGPT-PINN Testing ######################################
128 | rho = 3.25
129 | c_initial = torch.tensor([1.0],requires_grad=True)[:,None]
130 |
131 | TGPT_PINN = GPT(layers_gpt, rho, P_list[0:1], c_initial, f_hat, IC_u, xt_resid, IC_xt, BC1, BC2).to(device)
132 | tgpt_losses = gpt_train(TGPT_PINN, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_tgpt, lr_tgpt, tol_tgpt,testing=True)
133 |
134 | R_plot(xt_test, exact_u(xt_test,rho), title=fr"$\rho={round(rho,3)}$")
135 | R_plot(xt_test, TGPT_PINN.forward(xt_test), title=fr"$\rho={round(rho,3)}$")
136 | R_plot(xt_test, abs(TGPT_PINN.forward(xt_test)-exact_u(xt_test,rho).reshape(xt_test.shape[0],1)),title=fr"$\rho={round(rho,3)}$")
137 | loss_plot(tgpt_losses[2], tgpt_losses[1], title=fr"$\rho={round(rho,3)}$")
138 |
139 |
--------------------------------------------------------------------------------
/TGPT-ReactionDiffusion/main_TGPT_ReactionDiffusion.py:
--------------------------------------------------------------------------------
1 | # Import and GPU Support
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import torch
5 | #from torch import linspace
6 | import os
7 | import time
8 |
9 | from RD_data import create_residual_data, create_IC_data, create_BC_data
10 | from RD_Plotting import RD_plot,loss_plot
11 |
12 | # Full PINN
13 | from RD_PINN_wav import NN
14 | from RD_PINN_train import pinn_train
15 |
16 | # Reaction-Diffusion equation TGPT-PINN
17 | from RD_TGPT_activation import P
18 | from RD_TGPT_PINN import GPT
19 | from RD_TGPT_train import gpt_train
20 |
21 | torch.set_default_dtype(torch.float)
22 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23 | print(f"Current Device: {device}")
24 | if torch.cuda.is_available():
25 | print(f"Current Device Name: {torch.cuda.get_device_name()}")
26 |
27 | # Domain and Data
28 | Xi, Xf = 0.0, 2*np.pi
29 | Ti, Tf = 0.0, 1.0
30 | N_train, N_test, N_simple = 101, 88, 10000
31 | IC_pts, IC_simple = 101,101
32 | BC_pts = 101
33 |
34 | residual_data = create_residual_data(Xi, Xf, Ti, Tf, N_train, N_test, N_simple)
35 | xt_resid = residual_data[0].to(device)
36 | f_hat = residual_data[1].to(device)
37 | xt_test = residual_data[2].to(device)
38 |
39 | IC_data = create_IC_data(Xi, Xf, Ti, Tf, IC_pts, IC_simple)
40 | IC_xt = IC_data[0].to(device)
41 | IC_u = IC_data[1].to(device)
42 |
43 | BC_data = create_BC_data(Xi, Xf, Ti, Tf, BC_pts)
44 | BC1 = BC_data[0].to(device)
45 | BC2 = BC_data[1].to(device)
46 |
47 | # Training Parameter Set
48 | nu_training = np.linspace(5, 1, 11)
49 | rho_training = np.linspace(5, 1, 11)
50 |
51 | db_training = []
52 | for i in range(nu_training.shape[0]):
53 | for j in range(rho_training.shape[0]):
54 | db_training.append([nu_training[i],rho_training[j]])
55 | db_training = np.array(db_training)
56 |
57 | ###############################################################################
58 | #################################### Setup ####################################
59 | ###############################################################################
60 | number_of_neurons = 10
61 | db_neurons = [1 for i in range(number_of_neurons)]
62 | db_neurons[0] = [1.0, 5.0]
63 | loss_list = np.ones(number_of_neurons)
64 | P_list = np.ones(number_of_neurons, dtype=object)
65 | print(f"Expected Final GPT-PINN Depth: {[2,number_of_neurons,1]}\n")
66 |
67 | # PINN Setting
68 | lr_pinn = 0.001
69 | epochs_pinn = 120000
70 | layers_pinn = np.array([2, 40,40,40,1])
71 | tol_pinn = 1e-6
72 |
73 | # TGPT-PINN Setting
74 | lr_tgpt = 0.005
75 | epochs_tgpt = 10000
76 | tol_tgpt = 1e-5
77 |
78 | total_train_time_1 = time.perf_counter()
79 | ###############################################################################
80 | ################################ Training Loop ################################
81 | ###############################################################################
82 | for i in range(0, number_of_neurons):
83 | print("******************************************************************")
84 | ########################### Full PINN Training ############################
85 | db_pinn_train = db_neurons[i]
86 | nu_pinn_train, rho_pinn_train = db_pinn_train[0], db_pinn_train[1]
87 | #spilt_exact_u=torch.from_numpy(exact_u(Xi,Xf,Ti,Tf,N_test,nu_pinn_train,rho_pinn_train).reshape(xt_test.shape[0],1)).to(device)
88 | print(f"Begin Full PINN Training: nu ={nu_pinn_train}, rho = {rho_pinn_train}")
89 |
90 | pinn_train_time_1 = time.perf_counter()
91 | PINN = NN(layers_pinn, nu_pinn_train, rho_pinn_train).to(device)
92 |
93 | pinn_losses = pinn_train(PINN, xt_resid, IC_xt, IC_u, BC1, BC2,f_hat, epochs_pinn, lr_pinn, tol_pinn)
94 |
95 | pinn_train_time_2 = time.perf_counter()
96 | print(f"PINN Training Time: {(pinn_train_time_2-pinn_train_time_1)/3600} Hours")
97 |
98 | #rMAE = max(sum(abs(PINN(xt_test)-spilt_exact_u))/sum(abs(spilt_exact_u)))
99 | #rRMSE = torch.sqrt(sum((PINN(xt_test)-spilt_exact_u)**2)/sum((spilt_exact_u)**2)).item()
100 | #print(f"TGPT-PINN at {db_pinn_train} with the rMAE = {rMAE} and rRMSE = {rRMSE}")
101 |
102 | w1 = PINN.linears[0].weight.detach().cpu()
103 | w2 = PINN.linears[1].weight.detach().cpu()
104 | w3 = PINN.linears[2].weight.detach().cpu()
105 | w4 = PINN.linears[3].weight.detach().cpu()
106 |
107 | b1 = PINN.linears[0].bias.detach().cpu()
108 | b2 = PINN.linears[1].bias.detach().cpu()
109 | b3 = PINN.linears[2].bias.detach().cpu()
110 | b4 = PINN.linears[3].bias.detach().cpu()
111 |
112 | a1 = PINN.activation.w1.detach().cpu()
113 | a2 = PINN.activation.w2.detach().cpu()
114 |
115 | # Add new activation functions
116 | P_list[i] = P(layers_pinn, w1, w2, w3, w4, b1, b2, b3, b4, a1, a2).to(device)
117 |
118 | print(f"\nCurrent TGPT-PINN Depth: [2,{i+1},1]")
119 |
120 | ############################ GPT-PINN Training ############################
121 | layers_tgpt = np.array([2, i+1, 1])
122 | c_initial = torch.full((1,i+1), 1/(i+1))
123 |
124 | largest_case = 0
125 | largest_loss = 0
126 |
127 | tgpt_train_time_1 = time.perf_counter()
128 | for db in db_training:
129 | nu = db[0]
130 | rho = db[1]
131 |
132 | TGPT_PINN = GPT(layers_tgpt, nu, rho, P_list[0:i+1], c_initial, f_hat, IC_u, xt_resid, IC_xt, BC1, BC2).to(device)
133 |
134 | tgpt_losses = gpt_train(TGPT_PINN,nu, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_tgpt, lr_tgpt, largest_loss, largest_case)
135 |
136 | largest_loss = tgpt_losses[0]
137 | largest_case = tgpt_losses[1]
138 |
139 | tgpt_train_time_2 = time.perf_counter()
140 | print("TGPT-PINN Training Completed")
141 | print(f"\nTGPT Training Time: {(tgpt_train_time_2-tgpt_train_time_1)/3600} Hours")
142 |
143 | loss_list[i] = largest_loss
144 |
145 | if (i+1 < number_of_neurons):
146 | db_neurons[i+1] = largest_case
147 |
148 | print(f"\nLargest Loss (Using {i+1} Neurons): {largest_loss}")
149 | print(f"Parameter Case: {largest_case}")
150 |
151 | total_train_time_2 = time.perf_counter()
152 |
153 | ###############################################################################
154 | # Results of largest loss, parameters chosen, and times may vary based on
155 | # the initialization of full PINN and the final loss of the full PINN
156 | print("******************************************************************")
157 | print("*** Full PINN and TGPT-PINN Training Complete ***")
158 | print(f"Total Training Time: {(total_train_time_2-total_train_time_1)/3600} Hours\n")
159 | print(f"Final TGPT-PINN Depth: {[2,len(P_list),1]}")
160 | print(f"\nActivation Function Parameters: \n{db_neurons}\n")
161 |
162 | ########################## TGPT-PINN Testing ##########################
163 | nu = 1.5
164 | rho = 1.5
165 |
166 | TGPT_PINN = GPT(layers_tgpt, nu, rho, P_list[0:i+1], c_initial, f_hat, IC_u, xt_resid, IC_xt, BC1, BC2).to(device)
167 | test_losses = gpt_train(TGPT_PINN,nu, rho, f_hat, IC_u, xt_resid, xt_test, IC_xt, BC1, BC2, epochs_tgpt, lr_tgpt,testing=True)
168 |
169 | #spilt_exact_u=torch.from_numpy(exact_u(Xi,Xf,Ti,Tf,N_test,nu,rho).reshape(xt_test.shape[0],1)).to(device)
170 | RD_plot(xt_test, TGPT_PINN.forward(xt_test), title=fr"TGPT_PINN Solution at ${db}$")
171 | loss_plot(test_losses[2], test_losses[1], title=fr"TGPT_PINN Losses ${db}$")
172 | #rMAE = max(sum(abs(TGPT_PINN.forward(xt_test)-spilt_exact_u))/sum(abs(spilt_exact_u)))
173 | #rRMSE = torch.sqrt(sum((TGPT_PINN.forward(xt_test)-spilt_exact_u)**2)/sum((spilt_exact_u)**2))
174 | #print(f"TGPT-PINN at {db} with the rMAE = {rMAE} and rRMSE = {rRMSE.item()}")
175 |
--------------------------------------------------------------------------------