├── FDFD └── inverse_design_FDFD-epsstart-eps1.ipynb ├── FEM ├── Main.ipynb └── Module │ ├── FilterAndThreshold.jl │ ├── Helper.jl │ ├── MeshGenerator.jl │ ├── Model.jl │ ├── Objective.jl │ ├── PINN.jl │ └── PML.jl ├── LICENSE ├── README.md ├── holography ├── holography │ ├── __init__.py │ ├── bc.py │ ├── config.py │ ├── pde.py │ └── target.py └── holography_main.py ├── requirements.txt └── stokes └── stokes.py /FDFD/inverse_design_FDFD-epsstart-eps1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "using StatsBase: mean\n", 10 | "using NLopt\n", 11 | "using ChainRules\n", 12 | "using PyPlot\n", 13 | "using LinearAlgebra\n", 14 | "using SparseArrays\n", 15 | "using Zygote\n", 16 | "using DelimitedFiles\n", 17 | "using BenchmarkTools" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "function Maxwell_2d(Lx, Ly, ϵ, ω, dpml, resolution;\n", 27 | " Rpml=1e-20)\n", 28 | "\n", 29 | " nx = n_func(Lx, dpml, resolution) #nb point in x\n", 30 | " ny = n_func(Ly, dpml, resolution) #nb points in y\n", 31 | " npml = round(Int, dpml*resolution)\n", 32 | " δ = 1/resolution\n", 33 | " σ0 = -log(Rpml) / (4dpml^3/3)\n", 34 | "\n", 35 | " # coordinates centered in (0,0)\n", 36 | " x = (1-npml:nx-npml) * δ\n", 37 | " y = (1-npml:ny-npml) * δ\n", 38 | " \n", 39 | " #define the laplacian operator in x direction\n", 40 | " o = ones(nx)/δ\n", 41 | " \n", 42 | " σ = Float64[ξ>Lx ? σ0 * (ξ-Lx)^2 : ξ<0 ? σ0 * ξ^2 : 0.0 for ξ in x]\n", 43 | " Σ = spdiagm(0 => 1.0 ./(1 .+ (im/ω)*σ))\n", 44 | " \n", 45 | " Imat, J, V = SparseArrays.spdiagm_internal(-1 => -o[1:end-1], 0 => o);\n", 46 | " D = sparse(Imat, J, V, nx, nx)\n", 47 | " D[1,end] = -1/δ #periodic boundary condition in x direction\n", 48 | " ∇2x = Σ * transpose(D) * Σ * D\n", 49 | "\n", 50 | " #define the laplacian operator in y direction\n", 51 | " o = ones(ny) / δ\n", 52 | " y′=((-npml:ny-npml) .+ 0.5) * δ\n", 53 | "\n", 54 | " σ = Float64[ξ>Ly ? σ0 * (ξ-Ly)^2 : ξ<0 ? σ0 * ξ^2 : 0.0 for ξ in y]\n", 55 | " Σ = spdiagm(0 => 1.0 ./(1 .+ (im/ω)*σ))\n", 56 | " σ′ = Float64[ξ>Ly ? σ0 * (ξ-Ly)^2 : ξ<0 ? σ0 * ξ^2 : 0.0 for ξ in y′]\n", 57 | " Σ′ = spdiagm(0 => 1.0 ./(1 .+ (im/ω)*σ′))\n", 58 | " \n", 59 | " Imat, J, V = SparseArrays.spdiagm_internal(-1 => -o, 0 => o);\n", 60 | " D = sparse(Imat, J, V, ny+1, ny)\n", 61 | " ∇2y = Σ * transpose(D) * Σ′ * D\n", 62 | "\n", 63 | " #get 2d laplacian using kronecker product\n", 64 | " Ix = sparse(1.0I, nx, nx)\n", 65 | " Iy = sparse(1.0I, ny, ny)\n", 66 | " ∇2d = (kron(Ix, ∇2y) + kron(∇2x, Iy))\n", 67 | "\n", 68 | " if isa(ϵ, Function)\n", 69 | " geometry = ComplexF64[ϵ(ξ, ζ) for ζ in y, ξ in x]\n", 70 | " else\n", 71 | " geometry = ϵ \n", 72 | " end\n", 73 | "\n", 74 | " return (∇2d - spdiagm(0 => reshape(ω^2 * geometry, length(x)*length(y))),\n", 75 | " nx, ny, x, y)\n", 76 | "end\n" 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "metadata": {}, 82 | "source": [ 83 | "# Problem definition" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "changevarx(x) = x-2\n", 93 | "changevary(y) = y-2\n", 94 | "\n", 95 | "const dpml = 1\n", 96 | "const Lx = 4\n", 97 | "const Ly = 5\n", 98 | "const ω = 2pi\n", 99 | "const resolution = 40\n", 100 | "\n", 101 | "const dsource = 1.5" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "n_func(Lx, dpml, resolution) = round(Int, (Lx + 2*dpml) * resolution) #nb point in x\n", 111 | "\n", 112 | "function x_func(Ly, dpml, resolution)\n", 113 | " ny = n_func(Ly, dpml, resolution) #nb points in y\n", 114 | " npml = round(Int, dpml*resolution)\n", 115 | " δ = 1/resolution\n", 116 | " return (1-npml:ny-npml) * δ\n", 117 | "end" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "function create_geom_nopml(ϵ, ny, nx; defaulteps = 0)\n", 127 | " geom = ones((ny, nx))\n", 128 | " if defaulteps!=0\n", 129 | " geom[design_domain, :] .= defaulteps\n", 130 | " end\n", 131 | " geom[design_domain, non_pml_x] = ϵ\n", 132 | " return geom\n", 133 | "end\n", 134 | "\n", 135 | "function create_geom(ϵ, ny, nx)\n", 136 | " geom = ones((ny, nx))\n", 137 | " geom[design_domain, :] = ϵ\n", 138 | " return geom\n", 139 | "end\n", 140 | "\n", 141 | "function create_target(Lx, Ly, dpml, resolution, target_domain)\n", 142 | " \n", 143 | " nx = n_func(Lx, dpml, resolution)\n", 144 | " ny = n_func(Ly, dpml, resolution)\n", 145 | " target = zeros((ny, nx))\n", 146 | " \n", 147 | " x = x_func(Lx, dpml, resolution)\n", 148 | " y = x_func(Ly, dpml, resolution)\n", 149 | " mask_x = -0.5 .<= changevarx.(x) .<= 0.5\n", 150 | " mask_y = 1 .<= changevary.(y) .<= 2\n", 151 | " \n", 152 | " target[mask_y, mask_x] .= 1\n", 153 | " \n", 154 | " return target[target_domain, non_pml_x]\n", 155 | "end" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "y = x_func(Ly, dpml, resolution)\n", 165 | "x = x_func(Lx, dpml, resolution)\n", 166 | "\n", 167 | "const design_domain = -1 .<= changevary.(y) .<= 0;\n", 168 | "const target_domain = 0 .<= changevary.(y) .<= 3;\n", 169 | "const non_pml_x = -2 .<= changevary.(x) .<= 2;\n", 170 | "\n", 171 | "const target = create_target(Lx, Ly, dpml, resolution, target_domain); # square [-0.5, 1], [0.5, 2]" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "imshow(target)\n", 181 | "colorbar()" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": null, 187 | "metadata": {}, 188 | "outputs": [], 189 | "source": [ 190 | "const ub = 12\n", 191 | "const lb = 1;\n", 192 | "\n", 193 | "# const defaulteps = (lb+ub)/2" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "function solve_field(ϵ)\n", 203 | " \n", 204 | " nx = n_func(Lx, dpml, resolution) #nb point in x\n", 205 | " ny = n_func(Ly, dpml, resolution) #nb points in y\n", 206 | "# geom = create_geom_nopml(ϵ, ny, nx, defaulteps=defaulteps)\n", 207 | " geom = create_geom(ϵ, ny, nx)\n", 208 | " \n", 209 | " A, nx, ny, x, y=Maxwell_2d(Lx, Ly, geom, ω, dpml, resolution);\n", 210 | "\n", 211 | " J = zeros(ComplexF64, (ny, nx))\n", 212 | " J[Integer(dsource * resolution), :] = 1im * ω * ones(nx) * resolution #plane source\n", 213 | " Ez = reshape(A \\ J[:], (ny, nx));\n", 214 | " return A, Ez, ny, nx, x, y\n", 215 | "end" 216 | ] 217 | }, 218 | { 219 | "cell_type": "markdown", 220 | "metadata": {}, 221 | "source": [ 222 | "# Optimization problem setup" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "## define gradient using adjoint method" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "\"\"\"\n", 239 | "`MSE = MSE_func(ϵ)`\n", 240 | "this function returns the MSE between the intensity (defined as the abs2 of the electric field) and the target field\n", 241 | "\n", 242 | "ϵ is the permitivity of the design domain\n", 243 | "\"\"\"\n", 244 | "function MSE_func(ϵ)\n", 245 | " A, Ez, ny, nx, x, y = solve_field(ϵ)\n", 246 | " return mean((abs2.(Ez[target_domain, non_pml_x]) .- target).^2)\n", 247 | "end\n", 248 | "\n", 249 | "function ChainRules.rrule(::typeof(MSE_func), ϵ)\n", 250 | " A, Ez, ny, nx, x, y = solve_field(ϵ)\n", 251 | " \n", 252 | " target_points = zeros(ComplexF64, (ny, nx))\n", 253 | " target_points[target_domain,non_pml_x] = 2/length(target) .*(abs2.(Ez[target_domain, non_pml_x]) .- target).*conj.(Ez[target_domain, non_pml_x])\n", 254 | "# gradient = 2 * real(ω^2 .*reshape(conj.(A' \\ conj.(target_points[:])) .* Ez[:], (ny, nx))[design_domain, non_pml_x]);\n", 255 | " gradient = 2 * real(ω^2 .*reshape(conj.(A' \\ conj.(target_points[:])) .* Ez[:], (ny, nx))[design_domain, :]);\n", 256 | "\n", 257 | " \n", 258 | " function pullback(Δ)\n", 259 | " return (NO_FIELDS, gradient .* Δ)\n", 260 | " end\n", 261 | " \n", 262 | " return mean((abs2.(Ez[target_domain, non_pml_x]) .- target).^2), pullback \n", 263 | "end\n", 264 | "\n", 265 | "Zygote.refresh()" 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "cur_design = [lb for y_ in y[design_domain], x_ in x]; #initial guess with pml" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": null, 280 | "metadata": {}, 281 | "outputs": [], 282 | "source": [ 283 | "MSE_func(cur_design)" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "MSE_func'(cur_design)[:]" 293 | ] 294 | }, 295 | { 296 | "cell_type": "markdown", 297 | "metadata": {}, 298 | "source": [ 299 | "## inverse design using nlopt" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": null, 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "it = 0\n", 309 | "f_evals = []\n", 310 | "\n", 311 | "function myfunc(x::Vector, grad::Vector)\n", 312 | " \n", 313 | " global it, f_evals\n", 314 | " cur_design = reshape(x, (:, n_func(Lx, dpml, resolution)))\n", 315 | " \n", 316 | " if length(grad) > 0\n", 317 | " grad[:] = MSE_func'(cur_design)[:]\n", 318 | " end\n", 319 | " \n", 320 | " f = MSE_func(cur_design)\n", 321 | " println(\"f_$it = $f\")\n", 322 | " it+=1\n", 323 | " push!(f_evals, f)\n", 324 | " return f\n", 325 | "end" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": null, 331 | "metadata": { 332 | "scrolled": true 333 | }, 334 | "outputs": [], 335 | "source": [ 336 | "cur_design = [lb for y_ in y[design_domain], x_ in x]; #initial guess with pml\n", 337 | "\n", 338 | "opt = Opt(:LD_MMA, length(cur_design))\n", 339 | "\n", 340 | "opt.lower_bounds = lb .* ones(length(cur_design))\n", 341 | "opt.upper_bounds = ub .* ones(length(cur_design))\n", 342 | "opt.xtol_rel = 1e-8\n", 343 | "\n", 344 | "opt.min_objective = myfunc\n", 345 | "\n", 346 | "@time (minf,minx,ret) = optimize(opt, cur_design[:])\n", 347 | "numevals = opt.numevals # the number of function evaluations\n", 348 | "println(\"got $minf after $numevals iterations (returned $ret)\")" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": null, 354 | "metadata": {}, 355 | "outputs": [], 356 | "source": [ 357 | "A, Ez, ny, nx, x, y = solve_field(minx);" 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "metadata": {}, 364 | "outputs": [], 365 | "source": [ 366 | "loglog(f_evals)\n", 367 | "ylabel(\"MSE\")\n", 368 | "xlabel(\"# optimization step\")" 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": null, 374 | "metadata": {}, 375 | "outputs": [], 376 | "source": [ 377 | "optimal_design = reshape(minx, (:, n_func(Lx, dpml, resolution)))\n", 378 | "\n", 379 | "writedlm(\"optimal_design_eps1_.csv\", optimal_design, ',')" 380 | ] 381 | }, 382 | { 383 | "cell_type": "markdown", 384 | "metadata": {}, 385 | "source": [ 386 | "# Generating figures" 387 | ] 388 | }, 389 | { 390 | "cell_type": "code", 391 | "execution_count": null, 392 | "metadata": {}, 393 | "outputs": [], 394 | "source": [ 395 | "optimal_design = readdlm(\"optimal_design_eps1.csv\", ',')\n", 396 | "\n", 397 | "A, Ez, ny, nx, x, y = solve_field(optimal_design);" 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": null, 403 | "metadata": {}, 404 | "outputs": [], 405 | "source": [ 406 | "func = abs2\n", 407 | "cmapname = \"viridis\"\n", 408 | "\n", 409 | "subplot(1,2,1)\n", 410 | "title(\"$func(Ez)\")\n", 411 | "contourf(changevarx.(x),changevary.(y),func.(Ez), cmap=cmapname, levels=500)\n", 412 | "xlabel(\"x\")\n", 413 | "ylabel(\"y\")\n", 414 | "colorbar()\n", 415 | "\n", 416 | "subplot(1,2,2)\n", 417 | "title(\"geometry\")\n", 418 | "epsilonvals = create_geom(optimal_design, ny, nx)\n", 419 | "sourcecolor = maximum(epsilonvals)\n", 420 | "contourf(changevarx.(x),changevary.(y), epsilonvals, \n", 421 | " cmap=\"viridis\", levels=100)\n", 422 | "xlabel(\"x\")\n", 423 | "ylabel(\"y\")\n", 424 | "colorbar()\n", 425 | "\n", 426 | "tight_layout()" 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "execution_count": null, 432 | "metadata": {}, 433 | "outputs": [], 434 | "source": [ 435 | "@btime MSE_func'(optimal_design)" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": null, 441 | "metadata": {}, 442 | "outputs": [], 443 | "source": [ 444 | "@btime MSE_func(optimal_design)" 445 | ] 446 | }, 447 | { 448 | "cell_type": "code", 449 | "execution_count": null, 450 | "metadata": {}, 451 | "outputs": [], 452 | "source": [ 453 | "writed(\"intensity_FDFD.txt\", abs2.(Ez), ' ')" 454 | ] 455 | }, 456 | { 457 | "cell_type": "code", 458 | "execution_count": null, 459 | "metadata": {}, 460 | "outputs": [], 461 | "source": [ 462 | "writedlm(\"epsilonvals_FDFD.txt\", epsilonvals, ' ')" 463 | ] 464 | }, 465 | { 466 | "cell_type": "code", 467 | "execution_count": null, 468 | "metadata": {}, 469 | "outputs": [], 470 | "source": [ 471 | "writedlm(\"x_axis_FDFD.txt\", changevarx.(x), ' ')" 472 | ] 473 | }, 474 | { 475 | "cell_type": "code", 476 | "execution_count": null, 477 | "metadata": {}, 478 | "outputs": [], 479 | "source": [ 480 | "writedlm(\"y_axis_FDFD.txt\", changevary.(y), ' ')" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [] 489 | } 490 | ], 491 | "metadata": { 492 | "kernelspec": { 493 | "display_name": "Julia 1.5.0", 494 | "language": "julia", 495 | "name": "julia-1.5" 496 | }, 497 | "language_info": { 498 | "file_extension": ".jl", 499 | "mimetype": "application/julia", 500 | "name": "julia", 501 | "version": "1.5.0" 502 | } 503 | }, 504 | "nbformat": 4, 505 | "nbformat_minor": 4 506 | } 507 | -------------------------------------------------------------------------------- /FEM/Module/FilterAndThreshold.jl: -------------------------------------------------------------------------------- 1 | ################### Filter and Threshold ##################### 2 | # pf = Filter(p) 3 | a_f(r,u,v) = r^2*(∇(v)⊙∇(u))+v⊙u 4 | function Filter(pvec,r,flag_f::Bool,P,Pf,Qf,dΩ,dΓ) 5 | if (flag_f) 6 | ph = FEFunction(P,pvec) 7 | op = AffineFEOperator(Pf,Qf) do u, v 8 | ∫( a_f(r,u,v))dΩ, ∫( v*ph )dΩ,∫( 0*v )dΓ 9 | end 10 | pf = solve(op) 11 | return get_free_values(pf) 12 | else 13 | return pvec 14 | end 15 | end 16 | 17 | # Threshold function 18 | Threshold(β,η,flag_t::Bool,pf) = flag_t==false ? pf : ((tanh(β*η)+tanh(β*(pf-η)))/(tanh(β*η)+tanh(β*(1.0-η)))) -------------------------------------------------------------------------------- /FEM/Module/Helper.jl: -------------------------------------------------------------------------------- 1 | # Convert piece-wise constant p (design region) to pvec (whole domain) 2 | function p_vec(p,P,tags,design_tag) 3 | pvec = zeros(num_free_dofs(P)) 4 | pi = 0 5 | @assert length(tags)==num_free_dofs(P) 6 | for i=1:length(tags) 7 | if tags[i] == design_tag 8 | pi += 1 9 | pvec[i] = p[pi] 10 | end 11 | end 12 | pvec 13 | end 14 | 15 | # Extract the design region part from a whole vector 16 | function extract_design(pvec,np,tags,design_tag) 17 | p_d = zeros(eltype(pvec),np) 18 | pi = 0 19 | @assert length(pvec)==length(tags) 20 | for i=1:length(tags) 21 | if tags[i] == design_tag 22 | pi += 1 23 | p_d[pi] = pvec[i] 24 | end 25 | end 26 | @assert np==pi 27 | p_d 28 | end 29 | 30 | # Gaussian Distribution function with center x0 31 | function GaussianD(x,x0::AbstractArray,δ::AbstractArray) 32 | n=length(x) 33 | @assert (n==length(x0))&&(n==length(δ)) 34 | δn = 1.0 35 | x_δ = 0.0 36 | for i=1:n 37 | δn *= √(2π)*δ[i] 38 | x_δ += ((x[i]-x0[i])/δ[i])^2 39 | end 40 | 1.0/δn*exp(-x_δ/2.0) 41 | end 42 | 43 | # Gaussian Distribution function with center x0 44 | function GaussianY(x,x0::AbstractArray,δ::AbstractArray) 45 | n=length(x) 46 | @assert (n==length(x0))&&(n==length(δ)) 47 | δn = √(2π)*δ[2] 48 | x_δ = ((x[2]-x0[2])/δ[2])^2 49 | 1.0/δn*exp(-x_δ/2.0) 50 | end -------------------------------------------------------------------------------- /FEM/Module/MeshGenerator.jl: -------------------------------------------------------------------------------- 1 | ######################## Mesh Generation function with GMSH #################### 2 | function MeshGenerator(L,h1,h2,h3,hd,dpml,l0,ld,lpml) 3 | gmsh.initialize() 4 | gmsh.option.setNumber("General.Terminal", 1) 5 | gmsh.option.setNumber("Mesh.Algorithm", 6) 6 | gmsh.clear() 7 | gmsh.model.add("geometry") # name it whatever you want 8 | 9 | # Add points 10 | gmsh.model.geo.addPoint(-L/2-dpml, -hd-h2-h3-dpml, 0, lpml, 1) 11 | gmsh.model.geo.addPoint(-L/2 , -hd-h2-h3-dpml, 0, lpml, 2) 12 | gmsh.model.geo.addPoint(-L/2-dpml, -hd-h2-h3 , 0, lpml, 3) 13 | gmsh.model.geo.addPoint(-L/2 , -hd-h2-h3 , 0, l0, 4) 14 | gmsh.model.geo.addPoint( L/2 , -hd-h2-h3-dpml, 0, lpml, 5) 15 | gmsh.model.geo.addPoint( L/2 , -hd-h2-h3 , 0, l0, 6) 16 | gmsh.model.geo.addPoint( L/2+dpml, -hd-h2-h3-dpml, 0, lpml, 7) 17 | gmsh.model.geo.addPoint( L/2+dpml, -hd-h2-h3 , 0, lpml, 8) 18 | gmsh.model.geo.addPoint(-L/2-dpml, -hd-h2 , 0, l0, 9) 19 | gmsh.model.geo.addPoint(-L/2 , -hd-h2 , 0, l0, 10) 20 | gmsh.model.geo.addPoint( L/2 , -hd-h2 , 0, l0, 11) 21 | gmsh.model.geo.addPoint( L/2+dpml, -hd-h2 , 0, l0, 12) 22 | gmsh.model.geo.addPoint(-L/2-dpml, -hd , 0, ld, 13) 23 | gmsh.model.geo.addPoint(-L/2 , -hd , 0, ld, 14) 24 | gmsh.model.geo.addPoint( L/2 , -hd , 0, ld, 15) 25 | gmsh.model.geo.addPoint( L/2+dpml, -hd , 0, ld, 16) 26 | gmsh.model.geo.addPoint(-L/2-dpml, 0. , 0, ld, 17) 27 | gmsh.model.geo.addPoint(-L/2 , 0. , 0, ld, 18) 28 | gmsh.model.geo.addPoint( L/2 , 0. , 0, ld, 19) 29 | gmsh.model.geo.addPoint( L/2+dpml, 0. , 0, ld, 20) 30 | gmsh.model.geo.addPoint(-L/2-dpml, h1 , 0, lpml, 21) 31 | gmsh.model.geo.addPoint(-L/2 , h1 , 0, l0, 22) 32 | gmsh.model.geo.addPoint( L/2 , h1 , 0, l0, 23) 33 | gmsh.model.geo.addPoint( L/2+dpml, h1 , 0, lpml, 24) 34 | gmsh.model.geo.addPoint(-L/2-dpml, h1+dpml , 0, lpml, 25) 35 | gmsh.model.geo.addPoint(-L/2 , h1+dpml , 0, lpml, 26) 36 | gmsh.model.geo.addPoint( L/2 , h1+dpml , 0, lpml, 27) 37 | gmsh.model.geo.addPoint( L/2+dpml, h1+dpml , 0, lpml, 28) 38 | #gmsh.model.geo.addPoint(-Lt/2, h1/2-ht/2, 0, l0, 29) 39 | #gmsh.model.geo.addPoint( Lt/2, h1/2-ht/2, 0, l0, 30) 40 | #gmsh.model.geo.addPoint( Lt/2, h1/2+ht/2, 0, l0, 31) 41 | #gmsh.model.geo.addPoint(-Lt/2, h1/2+ht/2, 0, l0, 32) 42 | #gmsh.model.geo.addPoint(x_s,y_s, 0, l_s, 17) 43 | # Add lines 44 | gmsh.model.geo.addLine( 1, 2, 1) 45 | gmsh.model.geo.addLine( 2, 4, 2) 46 | gmsh.model.geo.addLine( 4, 3, 3) 47 | gmsh.model.geo.addLine( 3, 1, 4) 48 | gmsh.model.geo.addLine( 2, 5, 5) 49 | gmsh.model.geo.addLine( 5, 6, 6) 50 | gmsh.model.geo.addLine( 6, 4, 7) 51 | gmsh.model.geo.addLine( 5, 7, 8) 52 | gmsh.model.geo.addLine( 7, 8, 9) 53 | gmsh.model.geo.addLine( 8, 6, 10) 54 | gmsh.model.geo.addLine( 4, 10, 11) 55 | gmsh.model.geo.addLine( 10, 9, 12) 56 | gmsh.model.geo.addLine( 9, 3, 13) 57 | gmsh.model.geo.addLine( 6, 11, 14) 58 | gmsh.model.geo.addLine( 11, 10, 15) 59 | gmsh.model.geo.addLine( 8, 12, 16) 60 | gmsh.model.geo.addLine( 12, 11, 17) 61 | gmsh.model.geo.addLine( 10, 14, 18) 62 | gmsh.model.geo.addLine( 14, 13, 19) 63 | gmsh.model.geo.addLine( 13, 9, 20) 64 | gmsh.model.geo.addLine( 11, 15, 21) 65 | gmsh.model.geo.addLine( 15, 14, 22) 66 | gmsh.model.geo.addLine( 12, 16, 23) 67 | gmsh.model.geo.addLine( 16, 15, 24) 68 | gmsh.model.geo.addLine( 14, 18, 25) 69 | gmsh.model.geo.addLine( 18, 17, 26) 70 | gmsh.model.geo.addLine( 17, 13, 27) 71 | gmsh.model.geo.addLine( 15, 19, 28) 72 | gmsh.model.geo.addLine( 19, 18, 29) 73 | gmsh.model.geo.addLine( 16, 20, 30) 74 | gmsh.model.geo.addLine( 20, 19, 31) 75 | gmsh.model.geo.addLine( 18, 22, 32) 76 | gmsh.model.geo.addLine( 22, 21, 33) 77 | gmsh.model.geo.addLine( 21, 17, 34) 78 | gmsh.model.geo.addLine( 19, 23, 35) 79 | gmsh.model.geo.addLine( 23, 22, 36) 80 | gmsh.model.geo.addLine( 20, 24, 37) 81 | gmsh.model.geo.addLine( 24, 23, 38) 82 | gmsh.model.geo.addLine( 22, 26, 39) 83 | gmsh.model.geo.addLine( 26, 25, 40) 84 | gmsh.model.geo.addLine( 25, 21, 41) 85 | gmsh.model.geo.addLine( 23, 27, 42) 86 | gmsh.model.geo.addLine( 27, 26, 43) 87 | gmsh.model.geo.addLine( 24, 28, 44) 88 | gmsh.model.geo.addLine( 28, 27, 45) 89 | #gmsh.model.geo.addLine( 29, 30, 46) 90 | #gmsh.model.geo.addLine( 30, 31, 47) 91 | #gmsh.model.geo.addLine( 31, 32, 48) 92 | #gmsh.model.geo.addLine( 32, 29, 49) 93 | # Construct curve loops and surfaces 94 | gmsh.model.geo.addCurveLoop([ 1, 2, 3, 4], 1) 95 | gmsh.model.geo.addCurveLoop([ 5, 6, 7, -2], 2) 96 | gmsh.model.geo.addCurveLoop([ 8, 9, 10, -6], 3) 97 | gmsh.model.geo.addCurveLoop([ 11, 12, 13, -3], 4) 98 | gmsh.model.geo.addCurveLoop([ -7, 14, 15,-11], 5) 99 | gmsh.model.geo.addCurveLoop([ 16, 17,-14,-10], 6) 100 | gmsh.model.geo.addCurveLoop([ 18, 19, 20,-12], 7) 101 | gmsh.model.geo.addCurveLoop([-15, 21, 22,-18], 8) 102 | gmsh.model.geo.addCurveLoop([-17, 23, 24,-21], 9) 103 | gmsh.model.geo.addCurveLoop([-19, 25, 26, 27],10) 104 | gmsh.model.geo.addCurveLoop([-22, 28, 29,-25],11) 105 | gmsh.model.geo.addCurveLoop([-24, 30, 31,-28],12) 106 | gmsh.model.geo.addCurveLoop([-26, 32, 33, 34],13) 107 | gmsh.model.geo.addCurveLoop([-29, 35, 36,-32],14) 108 | gmsh.model.geo.addCurveLoop([-31, 37, 38,-35],15) 109 | gmsh.model.geo.addCurveLoop([-33, 39, 40, 41],16) 110 | gmsh.model.geo.addCurveLoop([-36, 42, 43,-39],17) 111 | gmsh.model.geo.addCurveLoop([-38, 44, 45,-42],18) 112 | #gmsh.model.geo.addCurveLoop([ 46, 47, 48, 49],19) 113 | #gmsh.model.geo.addCurveLoop([29, 30], 11) 114 | gmsh.model.geo.addPlaneSurface([ 1], 1) 115 | gmsh.model.geo.addPlaneSurface([ 2], 2) 116 | gmsh.model.geo.addPlaneSurface([ 3], 3) 117 | gmsh.model.geo.addPlaneSurface([ 4], 4) 118 | gmsh.model.geo.addPlaneSurface([ 5], 5) 119 | gmsh.model.geo.addPlaneSurface([ 6], 6) 120 | gmsh.model.geo.addPlaneSurface([ 7], 7) 121 | gmsh.model.geo.addPlaneSurface([ 8], 8) 122 | gmsh.model.geo.addPlaneSurface([ 9], 9) 123 | gmsh.model.geo.addPlaneSurface([10], 10) 124 | gmsh.model.geo.addPlaneSurface([11], 11) 125 | gmsh.model.geo.addPlaneSurface([12], 12) 126 | gmsh.model.geo.addPlaneSurface([13], 13) 127 | gmsh.model.geo.addPlaneSurface([14], 14) 128 | gmsh.model.geo.addPlaneSurface([15], 15) 129 | gmsh.model.geo.addPlaneSurface([16], 16) 130 | gmsh.model.geo.addPlaneSurface([17], 17) 131 | gmsh.model.geo.addPlaneSurface([18], 18) 132 | #gmsh.model.geo.addPlaneSurface([19], 19) 133 | # Physical groups 134 | gmsh.model.addPhysicalGroup(0, [1,3,21,25], 1) 135 | gmsh.model.setPhysicalName(0, 1, "LeftNodes") 136 | gmsh.model.addPhysicalGroup(0, [7,8,24,28], 2) 137 | gmsh.model.setPhysicalName(0, 2, "RightNodes") 138 | gmsh.model.addPhysicalGroup(0, [2,5], 3) 139 | gmsh.model.setPhysicalName(0, 3, "BottomNodes") 140 | gmsh.model.addPhysicalGroup(0, [26,27], 4) 141 | gmsh.model.setPhysicalName(0, 4, "TopNodes") 142 | gmsh.model.addPhysicalGroup(0, [13,17], 5) 143 | gmsh.model.setPhysicalName(0, 5, "LeftDNodes") 144 | gmsh.model.addPhysicalGroup(0, [16,20], 6) 145 | gmsh.model.setPhysicalName(0, 6, "RightDNodes") 146 | gmsh.model.addPhysicalGroup(0, [14,15,18,19], 7) 147 | gmsh.model.setPhysicalName(0, 7, "TBDNodes") 148 | gmsh.model.addPhysicalGroup(0, [4,6,22,23], 8) 149 | gmsh.model.setPhysicalName(0, 8, "InnerNodes") 150 | gmsh.model.addPhysicalGroup(0, [9,10,11,12], 9) 151 | gmsh.model.setPhysicalName(0, 9, "SourceNodes") 152 | 153 | gmsh.model.addPhysicalGroup(1, [4,13,20,34,41], 10) 154 | gmsh.model.setPhysicalName(1, 10, "LeftEdge") 155 | gmsh.model.addPhysicalGroup(1, [9,16,23,37,44], 11) 156 | gmsh.model.setPhysicalName(1, 11, "RightEdge") 157 | gmsh.model.addPhysicalGroup(1, [1,5,8], 12) 158 | gmsh.model.setPhysicalName(1, 12, "BottomEdge") 159 | gmsh.model.addPhysicalGroup(1, [40,43,45], 13) 160 | gmsh.model.setPhysicalName(1, 13, "TopEdge") 161 | gmsh.model.addPhysicalGroup(1, [27], 14) 162 | gmsh.model.setPhysicalName(1, 14, "LeftDEdge") 163 | gmsh.model.addPhysicalGroup(1, [30], 15) 164 | gmsh.model.setPhysicalName(1, 15, "RightDEdge") 165 | gmsh.model.addPhysicalGroup(1, [19,22,24,26,29,31], 16) 166 | gmsh.model.setPhysicalName(1, 16, "TBDEdge") 167 | gmsh.model.addPhysicalGroup(1, [2,3,6,7,10,11,14,18,21,25,28,32,33,35,36,38,39,42], 17) 168 | gmsh.model.setPhysicalName(1, 17, "InnerEdge") 169 | gmsh.model.addPhysicalGroup(1, [46,47,48,49], 18) 170 | gmsh.model.setPhysicalName(1, 18, "TargetEdge") 171 | gmsh.model.addPhysicalGroup(1, [12,15,17], 19) 172 | gmsh.model.setPhysicalName(1, 19, "SourceEdge") 173 | 174 | gmsh.model.addPhysicalGroup(2, [10,11,12], 20) 175 | gmsh.model.setPhysicalName(2, 20, "Design") 176 | gmsh.model.addPhysicalGroup(2, [16,17,18], 21) 177 | gmsh.model.setPhysicalName(2, 21, "TopPML") 178 | gmsh.model.addPhysicalGroup(2, [1,2,3], 22) 179 | gmsh.model.setPhysicalName(2, 22, "BottomPML") 180 | gmsh.model.addPhysicalGroup(2, [14], 23) 181 | gmsh.model.setPhysicalName(2, 23, "Target") 182 | gmsh.model.addPhysicalGroup(2, [4,5,6,7,8,9], 24) 183 | gmsh.model.setPhysicalName(2, 24, "Incident") 184 | gmsh.model.addPhysicalGroup(2, [13,15], 25) 185 | gmsh.model.setPhysicalName(2, 25, "LRTarget") 186 | gmsh.model.geo.synchronize() 187 | gmsh.model.mesh.generate(2) 188 | 189 | # ... and save it to disk 190 | gmsh.write("geometry.msh") 191 | gmsh.finalize() 192 | end -------------------------------------------------------------------------------- /FEM/Module/Model.jl: -------------------------------------------------------------------------------- 1 | # Define the theoretic model 2 | ################### Assemble matrix and vector ##################### 3 | # Weak form of the Helmholtz equation : 4 | # Hz: a(p,u,v)=Λ⋅∇v⋅ξ(p)Λ⋅∇u-k²μv⋅u 5 | # Material distribution 6 | #ξ0(x,ϵ1,ϵ2) = x[2]>0 ? 1/ϵ1 : 1/ϵ2 7 | #ξd(p,ϵmin,ϵmax)= 1/(ϵmin + (ϵmax-ϵmin)*p) - 1/ϵmin # in the design region 8 | #a0(u,v,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml) = ((x->Λ(x,σs,k,LHp,LHn,dpml))⋅∇(v))⊙((x->ξ0(x,ϵ1,ϵ2))*((x->Λ(x,σs,k,LHp,LHn,dpml))⋅∇(u))) - k^2*μ*(v*u) 9 | #a_d(u,v,ph,k,ϵ1,ϵd) = ∇(v)⊙(((p -> ξd(p, ϵ1, ϵd))∘ph)*∇(u)) 10 | # Ez: a(p,u,v)=1/μ*Λ⋅∇v⋅Λ⋅∇u-k²ξ(p)uv 11 | ξd(p,ϵmin,ϵmax)= ϵmin+(ϵmax-ϵmin)*p # in the design region 12 | a0(u,v,ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,Hd,dpml) = ((x->Λ(x,σs,Hd,k,LHp,LHn,dpml))⋅∇(v))⊙((1/μ)*((x->Λ(x,σs,Hd,k,LHp,LHn,dpml))⋅∇(u))) - k^2*(((p -> ξd(p, ϵ1, ϵ2))∘ph)*(v*u)) 13 | #a_d(u,v,ph,k,ϵ1,ϵd) = -k^2*((p -> ξd(p, ϵ1, ϵd))∘ph)*(v*u) 14 | # Source term (Gaussian point source approximation at center) 15 | 16 | #b(v,x0,δ) = v*(x->GaussianD(x,x0,δ)) 17 | 18 | 19 | function MatrixA(ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,Hd,U,V,dΩ) 20 | # Assemble the matrix 21 | return assemble_matrix(U,V) do u,v 22 | ∫( a0(u,v,ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,Hd,dpml) )dΩ 23 | end 24 | end 25 | 26 | function MatrixB(x0,δ,Amp,V,dΩ,dΓ) 27 | l_temp(v) = ∫( Amp*v*x->GaussianY(x,x0,δ) )*dΩ+∫( 0*v )*dΓ 28 | #op = AffineFEOperator((u,v)->(∫(u*v)*dΩ),l_temp,U,V) 29 | #op = FESource(l_temp,U,V) 30 | assemble_vector(l_temp,V) 31 | end 32 | -------------------------------------------------------------------------------- /FEM/Module/Objective.jl: -------------------------------------------------------------------------------- 1 | #g=g_u(uvec) 2 | f_target(x,Amp,h1,Lt,Ht) = Amp^2*((abs(x[1])Threshold(β,η,flag_t,pf))∘pfh 18 | A_mat = MatrixA(ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ) 19 | B_vec = MatrixB(x0,δ,2*π*Amp,V,dΩ,dΓ) 20 | u_vec = A_mat\B_vec 21 | u_vec 22 | end 23 | 24 | #pf = pf_p(p) 25 | function pf_p(p;r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag) 26 | pvec = p_vec(p,P,tags,design_tag) 27 | pf = Filter(pvec,r,flag_f,P,Pf,Qf,dΩ,dΓ_d) 28 | pf 29 | end 30 | # Chain Rule : dg/dp = dg/dg*dg/du*du/dpf*dpf/dp 31 | # dg/du=dg/dg*dg/du 32 | function rrule(::typeof(g_u),u_vec;Amp,h1,Lt,Ht,U,V,dΩ_t) 33 | function g_pullback(dgdg) 34 | NO_FIELDS, dgdg*Dgdu(u_vec,Amp,h1,Lt,Ht,U,V,dΩ_t) 35 | end 36 | g_u(u_vec;Amp,h1,Lt,Ht,U,V,dΩ_t), g_pullback 37 | end 38 | 39 | function Dgdu(u_vec,Amp,h1,Lt,Ht,U,V,dΩ_t) 40 | It(x) = f_target(x,Amp,h1,Lt,Ht) 41 | uh_t = FEFunction(U,u_vec) 42 | l_temp(du)=∫(4*uh_t*(abs2(uh_t)-It)*du)dΩ_t 43 | assemble_vector(l_temp,V) 44 | end 45 | 46 | # dg/dpf=dg/du*du/dpf 47 | function rrule(::typeof(u_pf),pf;x0,δ,Amp,P,Pf,β,η,flag_t,flag_f,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ,dΓ) 48 | u_vec = u_pf(pf;x0,δ,Amp,P,Pf,β,η,flag_t,flag_f,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ,dΓ) 49 | function u_pullback(dgdu) 50 | NO_FIELDS, Dgdpf(dgdu,u_vec,pf,P,Pf,β,η,flag_t,flag_f,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ) 51 | end 52 | u_vec, u_pullback 53 | end 54 | 55 | Dξdp(pf,ϵmin,ϵmax,β,η,flag_t)=(ϵmax-ϵmin)*(!flag_t+flag_t*β*(1.0-tanh(β*(pf-η))^2)/(tanh(β*η)+tanh(β*(1.0-η)))) 56 | dG(pfh,u,v,dp,ϵmin,ϵmax,k,β,η,flag_t) = real(k^2*((pf->Dξdp(pf,ϵmin,ϵmax,β,η,flag_t))∘pfh)*(v*u)*dp) 57 | 58 | function Dgdpf(dgdu,u_vec,pf,P,Pf,β,η,flag_t,flag_f,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ) 59 | if (flag_f) 60 | pfh = FEFunction(Pf,pf) 61 | ph = (pf->Threshold(β,η,flag_t,pf))∘pfh 62 | A_mat = MatrixA(ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ) 63 | λ_vec = A_mat'\dgdu 64 | 65 | uh = FEFunction(U,u_vec) 66 | λh = FEFunction(V,conj(λ_vec)) 67 | l_temp(dp) = ∫(dG(pfh,uh,λh,dp,ϵ1,ϵ2,k,β,η,flag_t))*dΩ 68 | dgdpf = assemble_vector(l_temp,Pf) 69 | return dgdpf 70 | else 71 | pfh = FEFunction(P,pf) 72 | ph = (pf->Threshold(β,η,flag_t,pf))∘pfh 73 | A_mat = MatrixA(ph,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ) 74 | λ_vec = A_mat'\dgdu 75 | 76 | uh = FEFunction(U,u_vec) 77 | λh = FEFunction(V,conj(λ_vec)) 78 | l_temp(dp) = ∫(dG(pfh,uh,λh,dp,ϵ1,ϵ2,k,β,η,flag_t))*dΩ 79 | dgdpf = assemble_vector(l_temp,P) 80 | return dgdpf 81 | end 82 | end 83 | 84 | # dg/dp=dg/dpf*dpf/dp 85 | function rrule(::typeof(pf_p),p;r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag) 86 | function pf_pullback(dgdpf) 87 | NO_FIELDS, Dgdp(dgdpf,p,r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag) 88 | end 89 | pf_p(p;r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag), pf_pullback 90 | end 91 | 92 | function Dgdp(dgdpf,p,r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag) 93 | np = length(p) 94 | if (flag_f) 95 | A = assemble_matrix(Pf,Qf) do u, v 96 | ∫( a_f(r,u,v))dΩ 97 | end 98 | λvec = A'\dgdpf 99 | λh = FEFunction(Pf,λvec) 100 | l_temp(dp) = ∫(λh*dp)*dΩ 101 | return extract_design(assemble_vector(l_temp,P),np,tags,design_tag) 102 | else 103 | return extract_design(dgdpf,np,tags,design_tag) 104 | end 105 | end 106 | 107 | # Final objective function 108 | function g_p(p::Vector;x0,δ,Amp,r,flag_f,P,Pf,Qf,β,η,flag_t, 109 | ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,dΩ,dΓ,dΩ_t,dΓ_d,tags,design_tag) 110 | pf = pf_p(p;r,flag_f,P,Pf,Qf,dΩ,dΓ_d,tags,design_tag) 111 | u_vec=u_pf(pf;x0,δ,Amp,P,Pf,β,η,flag_t,flag_f,ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,U,V,dΩ,dΓ) 112 | g_u(u_vec;Amp,h1,Lt,Ht,U,V,dΩ_t) 113 | end 114 | 115 | function g_p(p::Vector,grad::Vector;x0,δ,Amp,r,flag_f,P,Pf,Qf,β,η,flag_t, 116 | ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,dΩ,dΓ,dΩ_t,dΓ_d,tags,design_tag) 117 | if length(grad) > 0 118 | dgdp, = Zygote.gradient(p->g_p(p;x0,δ,Amp,r,flag_f,P,Pf,Qf,β,η,flag_t, 119 | ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,dΩ,dΓ,dΩ_t,dΓ_d,tags,design_tag),p) 120 | grad[:] = dgdp 121 | end 122 | g_value = g_p(p;x0,δ,Amp,r,flag_f,P,Pf,Qf,β,η,flag_t, 123 | ϵ1,ϵ2,μ,σs,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,dΩ,dΓ,dΩ_t,dΓ_d,tags,design_tag) 124 | @show g_value 125 | return g_value 126 | end -------------------------------------------------------------------------------- /FEM/Module/PINN.jl: -------------------------------------------------------------------------------- 1 | module PINN 2 | 3 | using Gmsh 4 | using Gridap 5 | #using GridapGmsh 6 | using SparseArrays 7 | #using Gridap.Geometry 8 | using ChainRules 9 | using Zygote 10 | using LinearAlgebra 11 | import ChainRules: rrule 12 | import Gmsh: gmsh 13 | 14 | export MeshGenerator 15 | export MatrixA 16 | export MatrixB 17 | export pf_p 18 | export u_pf 19 | export g_u 20 | export g_p 21 | export Threshold 22 | export p_vec 23 | export extract_design 24 | export Filter 25 | export a_f 26 | 27 | include("FilterAndThreshold.jl") 28 | include("Helper.jl") 29 | include("MeshGenerator.jl") 30 | include("PML.jl") 31 | include("Objective.jl") 32 | include("Model.jl") 33 | 34 | end -------------------------------------------------------------------------------- /FEM/Module/PML.jl: -------------------------------------------------------------------------------- 1 | # PML coordinate streching functions 2 | function s_PML(x,σs,Hd,k,LHp,LHn,dpml) 3 | σ = (x[2]<0)&&(x[2]+Hd>0) ? σs[2] : σs[1] 4 | xf = [x[1] x[2]] 5 | u = @. ifelse(xf>0 , xf-LHp , -xf-LHn) 6 | return @. ifelse(u > 0, 1-(1im*σ/k)*(u/dpml)^2, $(1.0+0im)) 7 | end 8 | 9 | function Λ(x,σs,Hd,k,LHp,LHn,dpml) 10 | s_x,s_y = s_PML(x,σs,Hd,k,LHp,LHn,dpml) 11 | return TensorValue(1/s_x,0,0,1/s_y) 12 | end -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hPINN: Physics-informed neural networks with hard constraints 2 | 3 | The source code for the paper [L. Lu, R. Pestourie, W. Yao, Z. Wang, F. Verdugo, & S. G. Johnson. Physics-informed neural networks with hard constraints for inverse design. *SIAM Journal on Scientific Computing*, 43(6), B1105-B1132, 2021](https://doi.org/10.1137/21M1397908). 4 | 5 | ## Code 6 | 7 | The code depends on the deep learning package [DeepXDE](https://github.com/lululxvi/deepxde) v0.9.1. If you want to use the latest DeepXDE, you need to modify the code. 8 | 9 | ### Holography 10 | 11 | - [hPINN](holography) 12 | - FDFD: [Jupyter Notebook](FDFD/inverse_design_FDFD-epsstart-eps1.ipynb) 13 | - FEM: [Jupyter Notebook](FEM/Main.ipynb) 14 | 15 | ### Fluids in Stokes flow 16 | 17 | - [hPINN](stokes/stokes.py) 18 | 19 | ## Cite this work 20 | 21 | If you use this code for academic research, you are encouraged to cite the following paper: 22 | 23 | ``` 24 | @article{lu2021physics, 25 | author = {Lu, Lu and Pestourie, Raphael and Yao, Wenjie and Wang, Zhicheng and Verdugo, Francesc and Johnson, Steven G}, 26 | title = {Physics-informed neural networks with hard constraints for inverse design}, 27 | journal = {SIAM Journal on Scientific Computing}, 28 | volume = {43}, 29 | number = {6}, 30 | pages = {B1105-B1132}, 31 | year = {2021}, 32 | doi = {10.1137/21M1397908} 33 | } 34 | ``` 35 | 36 | ## Questions 37 | 38 | To get help on how to use the code, simply open an issue in the GitHub "Issues" section. 39 | -------------------------------------------------------------------------------- /holography/holography/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lululxvi/hpinn/1a59c28dc52af26d719b6a6b2adf1f12f552fae4/holography/holography/__init__.py -------------------------------------------------------------------------------- /holography/holography/bc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from deepxde.backend import tf 3 | 4 | from .config import BOX, DPML, OMEGA 5 | 6 | 7 | # def interface12(x, _): 8 | # return np.isclose(x[1], -1) 9 | 10 | 11 | # def interface23(x, _): 12 | # return np.isclose(x[1], 0) 13 | 14 | 15 | # def interface12_ReE(inputs, outputs, X): 16 | # return getE(outputs, 1, "Re") - getE(outputs, 2, "Re") 17 | 18 | 19 | # def interface12_ImE(inputs, outputs, X): 20 | # return getE(outputs, 1, "Im") - getE(outputs, 2, "Im") 21 | 22 | 23 | # def interface12_dReE(inputs, outputs, X): 24 | # return dde.grad.jacobian( 25 | # outputs, inputs, i=getEcomp(1, "Re"), j=1 26 | # ) - dde.grad.jacobian(outputs, inputs, i=getEcomp(2, "Re"), j=1) 27 | 28 | 29 | # def interface12_dImE(inputs, outputs, X): 30 | # return dde.grad.jacobian( 31 | # outputs, inputs, i=getEcomp(1, "Im"), j=1 32 | # ) - dde.grad.jacobian(outputs, inputs, i=getEcomp(2, "Im"), j=1) 33 | 34 | 35 | # def interface23_ReE(inputs, outputs, X): 36 | # return getE(outputs, 2, "Re") - getE(outputs, 3, "Re") 37 | 38 | 39 | # def interface23_ImE(inputs, outputs, X): 40 | # return getE(outputs, 2, "Im") - getE(outputs, 3, "Im") 41 | 42 | 43 | # def interface23_dReE(inputs, outputs, X): 44 | # return dde.grad.jacobian( 45 | # outputs, inputs, i=getEcomp(2, "Re"), j=1 46 | # ) - dde.grad.jacobian(outputs, inputs, i=getEcomp(3, "Re"), j=1) 47 | 48 | 49 | # def interface23_dImE(inputs, outputs, X): 50 | # return dde.grad.jacobian( 51 | # outputs, inputs, i=getEcomp(2, "Im"), j=1 52 | # ) - dde.grad.jacobian(outputs, inputs, i=getEcomp(3, "Im"), j=1) 53 | 54 | 55 | # def boundary1_leftright(x, on_boundary): 56 | # return x[1] <= -1 and np.isclose(abs(x[0]), 6) 57 | 58 | 59 | # def boundary1_bottom(x, on_boundary): 60 | # return np.isclose(x[1], -3) 61 | 62 | 63 | # def boundary2(x, on_boundary): 64 | # return on_boundary and -1 <= x[1] <= 0 65 | 66 | 67 | # def boundary3_leftright(x, on_boundary): 68 | # return x[1] >= 0 and np.isclose(abs(x[0]), 6) 69 | 70 | 71 | # def boundary3_top(x, on_boundary): 72 | # return np.isclose(x[1], 6) 73 | 74 | 75 | def feature_transform(inputs): 76 | # Periodic BC in x 77 | P = BOX[1][0] - BOX[0][0] + 2 * DPML 78 | w = 2 * np.pi / P 79 | x, y = w * inputs[:, :1], inputs[:, 1:] 80 | return tf.concat( 81 | ( 82 | tf.math.cos(x), 83 | tf.math.sin(x), 84 | tf.math.cos(2 * x), 85 | tf.math.sin(2 * x), 86 | tf.math.cos(3 * x), 87 | tf.math.sin(3 * x), 88 | tf.math.cos(4 * x), 89 | tf.math.sin(4 * x), 90 | tf.math.cos(5 * x), 91 | tf.math.sin(5 * x), 92 | tf.math.cos(6 * x), 93 | tf.math.sin(6 * x), 94 | # tf.math.cos(7 * x), 95 | # tf.math.sin(7 * x), 96 | # tf.math.cos(8 * x), 97 | # tf.math.sin(8 * x), 98 | # tf.math.cos(9 * x), 99 | # tf.math.sin(9 * x), 100 | # tf.math.cos(10 * x), 101 | # tf.math.sin(10 * x), 102 | y, 103 | tf.math.cos(OMEGA * y), 104 | tf.math.sin(OMEGA * y), 105 | ), 106 | axis=1, 107 | ) 108 | 109 | 110 | def output_transform(inputs, outputs): 111 | x, y = inputs[:, :1], inputs[:, 1:] 112 | 113 | # 1 <= eps <= 12 114 | eps = 1 + 11 * tf.math.sigmoid(outputs[:, -1:]) 115 | 116 | # Zero Dirichlet BC 117 | a, b = BOX[0][1] - DPML, BOX[1][1] + DPML 118 | E = (1 - tf.math.exp(a - y)) * (1 - tf.math.exp(y - b)) * outputs[:, :2] 119 | 120 | # Zero Dirichlet and Neumann BC 121 | # a, b = BOX[0][1] - DPML, BOX[1][1] + DPML 122 | # E = 0.01 * (a - y) ** 2 * (y - b) ** 2 * outputs[:, :2] 123 | 124 | # return E 125 | return tf.concat((E, eps), axis=1) 126 | 127 | 128 | # def output_transform(inputs, outputs): 129 | # a, b = -3, 6 130 | # y = inputs[:, 1:] 131 | # E1 = (1 - tf.math.exp(a - y)) * outputs[:, :2] 132 | # E2 = outputs[:, 2:4] 133 | # E3 = (1 - tf.math.exp(y - b)) * outputs[:, 4:6] 134 | # eps = 1 + 11 * tf.math.sigmoid(outputs[:, -1:]) 135 | # # return tf.concat([outputs[:, :-1], eps], 1) 136 | # return tf.concat((E1, E2, E3, eps), 1) 137 | 138 | 139 | # def output_transform(inputs, outputs): 140 | # x, y = inputs[:, :1], inputs[:, 1:] 141 | 142 | # # 1 <= eps <= 12 143 | # eps = 1 + 11 * tf.math.sigmoid(outputs[:, -1:]) 144 | 145 | # # Continuity of E and dE/dy 146 | # P = 12 147 | # c, d = -1, 0 148 | # w = 2 * np.pi / P 149 | # e = tf.concat( 150 | # ( 151 | # tf.math.cos(w * x), 152 | # tf.math.sin(w * x), 153 | # tf.math.cos(2 * w * x), 154 | # tf.math.sin(2 * w * x), 155 | # ), 156 | # axis=1, 157 | # ) 158 | # units = 16 159 | # ReN1 = tf.keras.layers.Dense(units, activation="tanh")(e) 160 | # ReN1 = tf.keras.layers.Dense(1)(ReN1) * 0.1 161 | # ReN2 = tf.keras.layers.Dense(units, activation="tanh")(e) 162 | # ReN2 = tf.keras.layers.Dense(1)(ReN2) * 0.1 163 | # ImN1 = tf.keras.layers.Dense(units, activation="tanh")(e) 164 | # ImN1 = tf.keras.layers.Dense(1)(ImN1) * 0.1 165 | # ImN2 = tf.keras.layers.Dense(units, activation="tanh")(e) 166 | # ImN2 = tf.keras.layers.Dense(1)(ImN2) * 0.1 167 | # y1 = y - c 168 | # y1 = y1 * tf.math.abs(y1) 169 | # y2 = y - d 170 | # y2 = y2 * tf.math.abs(y2) 171 | # ReE = outputs[:, :1] + ReN1 * y1 + ReN2 * y2 172 | # ImE = outputs[:, 1:2] + ImN1 * y1 + ImN2 * y2 173 | # E = tf.concat((ReE, ImE), axis=1) 174 | 175 | # # Zero Dirichlet BC 176 | # a, b = -3, 6 177 | # E = (1 - tf.math.exp(a - y)) * (1 - tf.math.exp(y - b)) * E 178 | # return tf.concat((E, eps), axis=1) 179 | -------------------------------------------------------------------------------- /holography/holography/config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | BOX = np.array([[-2, -2], [2, 3]]) 5 | DPML = 1 6 | 7 | OMEGA = 2 * np.pi 8 | 9 | SIGMA0 = -np.log(1e-20) / (4 * DPML ** 3 / 3) 10 | 11 | 12 | def J(x): 13 | # Approximate the delta function 14 | y = x[:, 1:] + 1.5 15 | # hat function of width 2 * h 16 | # h = 0.5 17 | # return 1 / h * np.maximum(1 - np.abs(y / h), 0) 18 | # normal distribution of width ~2 * 2.5h 19 | h = 0.2 20 | return 1 / (h * np.pi ** 0.5) * np.exp(-((y / h) ** 2)) * (np.abs(y) < 0.5) 21 | # constant function of width 2 * h 22 | # h = 0.25 23 | # return 1 / (2 * h) * (np.abs(y) < h) 24 | 25 | 26 | def target_square(X): 27 | f1 = np.heaviside((X[:, :1] + 0.5) * (0.5 - X[:, :1]), 0.5) 28 | f2 = np.heaviside((X[:, 1:] - 1) * (2 - X[:, 1:]), 0.5) 29 | return f1 * f2 30 | 31 | 32 | def target_ring(X): 33 | d = np.linalg.norm(X - np.array([0, 1.5]), axis=1, keepdims=True) 34 | return np.heaviside((d - 0.9) * (1 - d), 0.5) 35 | 36 | 37 | def target_face(X): 38 | c = dde.geometry.Disk([0, 1.5], 1) - dde.geometry.Disk([0, 1.5], 0.9) 39 | eye1 = dde.geometry.Disk([-0.5, 1.8], 0.1) 40 | eye2 = dde.geometry.Disk([0.5, 1.8], 0.1) 41 | mouth = dde.geometry.Disk([0, 1.3], 0.4) & dde.geometry.Rectangle( 42 | [-0.4, 0], [0.4, 1.3] 43 | ) 44 | face = c | eye1 | eye2 | mouth 45 | return np.array([int(face.inside(x)) for x in X])[:, None] 46 | 47 | 48 | target = target_square 49 | 50 | 51 | def getEcomp(i, part): 52 | if part not in ["Re", "Im"]: 53 | raise ValueError(f"part={part}") 54 | # if i == 1: 55 | # idx = 0 if part == "Re" else 1 56 | # elif i == 2: 57 | # idx = 2 if part == "Re" else 3 58 | # elif i == 3: 59 | # idx = 4 if part == "Re" else 5 60 | idx = 0 if part == "Re" else 1 61 | return idx 62 | 63 | 64 | def getE(outputs, i, part): 65 | idx = getEcomp(i, part) 66 | return outputs[:, idx : idx + 1] 67 | -------------------------------------------------------------------------------- /holography/holography/pde.py: -------------------------------------------------------------------------------- 1 | import deepxde as dde 2 | import numpy as np 3 | 4 | from .config import BOX, J, OMEGA, SIGMA0 5 | 6 | 7 | def PML(X): 8 | def sigma(x, a, b): 9 | """sigma(x) = 0 if a < x < b, else grows cubically from zero. 10 | """ 11 | 12 | def _sigma(d): 13 | return SIGMA0 * d ** 2 * np.heaviside(d, 0) 14 | 15 | return _sigma(a - x) + _sigma(x - b) 16 | 17 | def dsigma(x, a, b): 18 | def _sigma(d): 19 | return 2 * SIGMA0 * d * np.heaviside(d, 0) 20 | 21 | return -_sigma(a - x) + _sigma(x - b) 22 | 23 | sigma_x = sigma(X[:, :1], BOX[0][0], BOX[1][0]) 24 | AB1 = 1 / (1 + 1j / OMEGA * sigma_x) ** 2 25 | A1, B1 = AB1.real, AB1.imag 26 | 27 | dsigma_x = dsigma(X[:, :1], BOX[0][0], BOX[1][0]) 28 | AB2 = -1j / OMEGA * dsigma_x * AB1 / (1 + 1j / OMEGA * sigma_x) 29 | A2, B2 = AB2.real, AB2.imag 30 | 31 | sigma_y = sigma(X[:, 1:], BOX[0][1], BOX[1][1]) 32 | AB3 = 1 / (1 + 1j / OMEGA * sigma_y) ** 2 33 | A3, B3 = AB3.real, AB3.imag 34 | 35 | dsigma_y = dsigma(X[:, 1:], BOX[0][1], BOX[1][1]) 36 | AB4 = -1j / OMEGA * dsigma_y * AB3 / (1 + 1j / OMEGA * sigma_y) 37 | A4, B4 = AB4.real, AB4.imag 38 | return A1, B1, A2, B2, A3, B3, A4, B4 39 | 40 | 41 | def pde(inputs, outputs, X, ReE, ImE, eps): 42 | A1, B1, A2, B2, A3, B3, A4, B4 = PML(X) 43 | 44 | dReE_x = dde.grad.jacobian(outputs, inputs, i=ReE, j=0) 45 | dReE_y = dde.grad.jacobian(outputs, inputs, i=ReE, j=1) 46 | dReE_xx = dde.grad.hessian(outputs, inputs, component=ReE, i=0, j=0) 47 | dReE_yy = dde.grad.hessian(outputs, inputs, component=ReE, i=1, j=1) 48 | dImE_x = dde.grad.jacobian(outputs, inputs, i=ImE, j=0) 49 | dImE_y = dde.grad.jacobian(outputs, inputs, i=ImE, j=1) 50 | dImE_xx = dde.grad.hessian(outputs, inputs, component=ImE, i=0, j=0) 51 | dImE_yy = dde.grad.hessian(outputs, inputs, component=ImE, i=1, j=1) 52 | 53 | ReE = outputs[:, ReE : ReE + 1] 54 | ImE = outputs[:, ImE : ImE + 1] 55 | 56 | loss_Re = ( 57 | (A1 * dReE_xx + A2 * dReE_x + A3 * dReE_yy + A4 * dReE_y) / OMEGA 58 | - (B1 * dImE_xx + B2 * dImE_x + B3 * dImE_yy + B4 * dImE_y) / OMEGA 59 | + eps * OMEGA * ReE 60 | ) 61 | loss_Im = ( 62 | (A1 * dImE_xx + A2 * dImE_x + A3 * dImE_yy + A4 * dImE_y) / OMEGA 63 | + (B1 * dReE_xx + B2 * dReE_x + B3 * dReE_yy + B4 * dReE_y) / OMEGA 64 | + eps * OMEGA * ImE 65 | + J(X) 66 | ) 67 | # return loss_Re, loss_Im 68 | return loss_Re, loss_Im, loss_Re, loss_Im # augmented_Lagrangian 69 | 70 | 71 | def pde_domain(inputs, outputs, X): 72 | condition = np.logical_and(X[:, 1:] < 0, X[:, 1:] > -1).astype(np.float32) 73 | eps = outputs[:, -1:] * condition + 1 - condition 74 | # eps = 1 75 | return pde(inputs, outputs, X, 0, 1, eps) 76 | 77 | 78 | # def pde_bc(outputs, inputs, ReE, ImE, X, eps=1): 79 | # loss_Re, loss_Im = pde(inputs, outputs, X, ReE, ImE, eps) 80 | # return tf.concat([loss_Re, loss_Im], axis=1) 81 | 82 | 83 | # def pde_E1_ReIm(inputs, outputs, X): 84 | # return pde_bc(outputs, inputs, getEcomp(1, "Re"), getEcomp(1, "Im"), X) 85 | 86 | 87 | # def pde_E2_ReIm(inputs, outputs, X): 88 | # return pde_bc( 89 | # outputs, inputs, getEcomp(2, "Re"), getEcomp(2, "Im"), X, eps=outputs[:, -1:] 90 | # ) 91 | 92 | 93 | # def pde_E3_ReIm(inputs, outputs, X): 94 | # return pde_bc(outputs, inputs, getEcomp(3, "Re"), getEcomp(3, "Im"), X) 95 | -------------------------------------------------------------------------------- /holography/holography/target.py: -------------------------------------------------------------------------------- 1 | import deepxde as dde 2 | 3 | from .config import getE, target 4 | 5 | 6 | def target_bc(inputs, outputs, X): 7 | return getE(outputs, 3, "Re") ** 2 + getE(outputs, 3, "Im") ** 2 - target(X) 8 | 9 | 10 | class TargetErr(dde.callbacks.Callback): 11 | def __init__(self, geom, period=1, filename=None): 12 | super(TargetErr, self).__init__() 13 | self.period = period 14 | 15 | self.x = geom.uniform_points(50000) 16 | self.y_true = target(self.x) 17 | self.file = sys.stdout if filename is None else open(filename, "w", buffering=1) 18 | self.value = None 19 | self.epochs_since_last = 0 20 | 21 | def init(self): 22 | self.feed_dict = self.model.net.feed_dict(False, False, 2, self.x) 23 | 24 | def on_train_begin(self): 25 | y_pred = self.model.sess.run(self.model.net.outputs, feed_dict=self.feed_dict) 26 | y_pred = (y_pred[:, 0] ** 2 + y_pred[:, 1] ** 2) ** 0.5 27 | self.value = np.mean((self.y_true - y_pred) ** 2) 28 | print( 29 | self.model.train_state.epoch, self.value, file=self.file, 30 | ) 31 | self.file.flush() 32 | 33 | def on_train_end(self): 34 | self.on_train_begin() 35 | 36 | def on_epoch_end(self): 37 | self.epochs_since_last += 1 38 | if self.epochs_since_last >= self.period: 39 | self.epochs_since_last = 0 40 | self.on_train_begin() 41 | 42 | 43 | # def target1(inputs, outputs, X): 44 | # return getE(outputs, 3, "Re") ** 2 + getE(outputs, 3, "Im") ** 2 - 1 45 | 46 | 47 | # def target0(inputs, outputs, X): 48 | # return getE(outputs, 3, "Re") ** 2 + getE(outputs, 3, "Im") ** 2 49 | -------------------------------------------------------------------------------- /holography/holography_main.py: -------------------------------------------------------------------------------- 1 | from scipy.interpolate import griddata 2 | 3 | import deepxde as dde 4 | import numpy as np 5 | from deepxde.backend import tf 6 | 7 | # tf.random.set_random_seed(1234) 8 | 9 | from holography.bc import feature_transform, output_transform 10 | from holography.config import BOX, DPML, J 11 | from holography.pde import pde_domain 12 | from holography.target import target_bc 13 | 14 | 15 | def l2_relative_error_1(y_true, y_pred): 16 | return dde.metrics.nanl2_relative_error(y_true[:, 0], y_pred[:, 0]) 17 | 18 | 19 | def l2_relative_error_2(y_true, y_pred): 20 | return dde.metrics.nanl2_relative_error(y_true[:, 1], y_pred[:, 1]) 21 | 22 | 23 | def solution_forward(x): 24 | # solution for normal distribution J 25 | d = np.loadtxt("FDTD.dat") 26 | ReE = griddata(d[:, :2], d[:, 2], x) 27 | ImE = griddata(d[:, :2], d[:, 3], x) 28 | return np.vstack((ReE, ImE)).T 29 | 30 | 31 | def save_solution(geom, model, filename_E, filename_residual): 32 | # x_center = np.vstack( 33 | # (np.full(1000, 0), np.linspace(BOX[0][1] - DPML, BOX[1][1] + DPML, num=1000)) 34 | # ).T 35 | # y_pred = model.predict(x_center) 36 | # print("Saving E_x0.dat ...") 37 | # np.savetxt("E_x0.dat", np.hstack((x_center, y_pred))) 38 | # print("Saving J.dat ...") 39 | # np.savetxt("J.dat", np.hstack((x_center, J(x_center)))) 40 | 41 | x = geom.uniform_points(50000) 42 | y_pred = model.predict(x) 43 | print("Saving E ...\n") 44 | np.savetxt(filename_E, np.hstack((x, y_pred[:, :2]))) 45 | 46 | # residual_Re, residual_Im = model.predict(x, operator=pde_domain) 47 | residual_Re, residual_Im, _, _ = model.predict(x, operator=pde_domain) 48 | print("Saving residual ...\n") 49 | np.savetxt(filename_residual, np.hstack((x, residual_Re, residual_Im))) 50 | 51 | 52 | def save_epsilon(geom, model, filename): 53 | x = geom.uniform_points(60000) 54 | y_pred = model.predict(x) 55 | print("Saving epsilon ...\n") 56 | np.savetxt(filename, np.hstack((x, y_pred[:, -1:]))) 57 | 58 | 59 | def penalty(model, geom2, mu, beta): 60 | i = 0 61 | while mu < 100: 62 | i += 1 63 | mu *= beta 64 | print("-" * 80) 65 | print(f"Iteration {i}: mu = {mu}\n") 66 | 67 | loss_weights = [0.5 * mu] * 2 + [1] 68 | # model.compile("adam", lr=0.001, loss_weights=loss_weights) 69 | # losshistory, train_state = model.train(epochs=1000) 70 | model.compile("L-BFGS-B", loss_weights=loss_weights) 71 | losshistory, train_state = model.train(disregard_previous_best=True) 72 | 73 | save_epsilon(geom2, model, f"epsilon{i}.dat") 74 | # save_solution(geom, model, f"E{i}.dat", f"residual{i}.dat") 75 | 76 | 77 | def augmented_Lagrangian(model, geom, geom2, mu, beta): 78 | x = model.data.train_x[np.sum(model.data.num_bcs) :] 79 | lambla_Re, lambla_Im = np.zeros((len(x), 1)), np.zeros((len(x), 1)) 80 | 81 | for i in range(1, 10): 82 | # lambla_Re and lambla_Im are one half smaller than that defined in the paper. 83 | residual_Re, residual_Im, _, _ = model.predict(x, operator=pde_domain) 84 | lambla_Re += mu * residual_Re 85 | lambla_Im += mu * residual_Im 86 | 87 | mu *= beta 88 | print("-" * 80) 89 | print(f"Iteration {i}: mu = {mu}\n") 90 | 91 | def loss_Lagrangian_Re(_, y): 92 | return tf.reduce_mean(lambla_Re * y) 93 | 94 | def loss_Lagrangian_Im(_, y): 95 | return tf.reduce_mean(lambla_Im * y) 96 | 97 | loss_weights = [0.5 * mu] * 2 + [1, 1] + [1] 98 | loss = ["MSE", "MSE", loss_Lagrangian_Re, loss_Lagrangian_Im, "MSE"] 99 | model.compile("L-BFGS-B", loss=loss, loss_weights=loss_weights) 100 | losshistory, train_state = model.train(disregard_previous_best=True) 101 | 102 | save_epsilon(geom2, model, f"epsilon{i}.dat") 103 | np.savetxt(f"lambda_Re{i}.dat", lambla_Re) 104 | np.savetxt(f"lambda_Im{i}.dat", lambla_Im) 105 | np.savetxt(f"lambda{i}.dat", np.hstack((x, lambla_Re, lambla_Im))) 106 | # save_solution(geom, model, f"E{i}.dat", f"residual{i}.dat") 107 | 108 | 109 | def main(): 110 | # In some GPUs, float64 is required to make L-BFGS work for some reason... 111 | # dde.config.real.set_float64() 112 | 113 | geom = dde.geometry.Rectangle(BOX[0] - DPML, BOX[1] + DPML) 114 | geom1 = dde.geometry.Rectangle(BOX[0] - DPML, [BOX[1][0] + DPML, -1]) 115 | geom2 = dde.geometry.Rectangle([BOX[0][0] - DPML, -1], [BOX[1][0] + DPML, 0]) 116 | geom3 = dde.geometry.Rectangle([BOX[0][0] - DPML, 0], BOX[1] + DPML) 117 | geom3_small = dde.geometry.Rectangle([BOX[0][0], 0], BOX[1]) 118 | # geom3_in = dde.geometry.Rectangle([-0.5, 1], [0.5, 2]) 119 | # geom3_out = geom3 - geom3_in 120 | 121 | net = dde.maps.PFNN([2] + [[48] * 3] * 4 + [3], "tanh", "Glorot normal") 122 | net.apply_feature_transform(feature_transform) 123 | net.apply_output_transform(output_transform) 124 | 125 | # Fit to the planewave solution 126 | # E0 = np.loadtxt("E0_normal.dat") 127 | # E0 = E0[np.random.choice(len(E0), size=10000, replace=False)] 128 | # ptset = dde.bc.PointSet(E0[:, :2]) 129 | # inside = lambda x, _: ptset.inside(x) 130 | # loss0 = [ 131 | # dde.DirichletBC(geom, ptset.values_to_func(E0[:, 2:3]), inside, component=0), 132 | # dde.DirichletBC(geom, ptset.values_to_func(E0[:, 3:4]), inside, component=1), 133 | # dde.DirichletBC(geom, ptset.values_to_func(E0[:, 4:5]), inside, component=2), 134 | # ] 135 | 136 | # data = dde.data.PDE(geom, pde_domain, loss0, anchors=E0[:, :2]) 137 | # model = dde.Model(data, net) 138 | # checkpointer = dde.callbacks.ModelCheckpoint( 139 | # "model/model.ckpt", verbose=1, save_better_only=True 140 | # ) 141 | # model.compile("adam", lr=0.001, loss_weights=[0] * 2 + [10] * 2 + [0.1]) 142 | # losshistory, train_state = model.train(epochs=3000) 143 | # model.compile("adam", lr=0.001, loss_weights=[1] * 2 + [10] * 2 + [0]) 144 | # losshistory, train_state = model.train( 145 | # epochs=10000, callbacks=[checkpointer], disregard_previous_best=True 146 | # ) 147 | # model.compile("L-BFGS-B", loss_weights=[1] * 2 + [10] * 2 + [0]) 148 | # losshistory, train_state = model.train(callbacks=[checkpointer]) 149 | # dde.saveplot(losshistory, train_state, issave=True, isplot=False) 150 | # save_solution(geom, model) 151 | # return 152 | 153 | losses = [] 154 | # PDE (3) 155 | # losses += [ 156 | # dde.OperatorBC(geom, pde_E1_ReIm, lambda x, _: geom1.inside(x)), 157 | # dde.OperatorBC(geom, pde_E2_ReIm, lambda x, _: geom2.inside(x)), 158 | # dde.OperatorBC(geom, pde_E3_ReIm, lambda x, _: geom3.inside(x)), 159 | # ] 160 | 161 | # PML BC 162 | # Periodic BC (12) 163 | # losses += [ 164 | # dde.PeriodicBC(geom, 0, boundary1_leftright, derivative_order=0, component=0), 165 | # dde.PeriodicBC(geom, 0, boundary1_leftright, derivative_order=1, component=0), 166 | # dde.PeriodicBC(geom, 0, boundary1_leftright, derivative_order=0, component=1), 167 | # dde.PeriodicBC(geom, 0, boundary1_leftright, derivative_order=1, component=1), 168 | # dde.PeriodicBC(geom, 0, boundary2, derivative_order=0, component=2), 169 | # dde.PeriodicBC(geom, 0, boundary2, derivative_order=1, component=2), 170 | # dde.PeriodicBC(geom, 0, boundary2, derivative_order=0, component=3), 171 | # dde.PeriodicBC(geom, 0, boundary2, derivative_order=1, component=3), 172 | # dde.PeriodicBC(geom, 0, boundary3_leftright, derivative_order=0, component=4), 173 | # dde.PeriodicBC(geom, 0, boundary3_leftright, derivative_order=1, component=4), 174 | # dde.PeriodicBC(geom, 0, boundary3_leftright, derivative_order=0, component=5), 175 | # dde.PeriodicBC(geom, 0, boundary3_leftright, derivative_order=1, component=5), 176 | # ] 177 | # Dirichlet BC (4) 178 | # losses += [ 179 | # dde.DirichletBC(geom, lambda _: 0, boundary1_bottom, component=0), 180 | # dde.DirichletBC(geom, lambda _: 0, boundary1_bottom, component=1), 181 | # dde.DirichletBC(geom, lambda _: 0, boundary3_top, component=4), 182 | # dde.DirichletBC(geom, lambda _: 0, boundary3_top, component=5), 183 | # ] 184 | 185 | # Interface between Omega_1 and Omega_2 (4) 186 | # losses += [ 187 | # dde.OperatorBC(geom, interface12_ReE, interface12), 188 | # dde.OperatorBC(geom, interface12_ImE, interface12), 189 | # dde.OperatorBC(geom, interface12_dReE, interface12), 190 | # dde.OperatorBC(geom, interface12_dImE, interface12), 191 | # ] 192 | # Interface between Omega_2 and Omega_3 (4) 193 | # losses += [ 194 | # dde.OperatorBC(geom, interface23_ReE, interface23), 195 | # dde.OperatorBC(geom, interface23_ImE, interface23), 196 | # dde.OperatorBC(geom, interface23_dReE, interface23), 197 | # dde.OperatorBC(geom, interface23_dImE, interface23), 198 | # ] 199 | 200 | # Target (2) 201 | losses += [ 202 | dde.OperatorBC(geom, target_bc, lambda x, _: geom3_small.inside(x)), 203 | # dde.OperatorBC(geom, target1, lambda x, _: geom3_in.inside(x)), 204 | # dde.OperatorBC(geom, target0, lambda x, _: geom3_out.inside(x)), 205 | ] 206 | 207 | # Points 208 | dx = 0.05 209 | # Extra points 210 | # h = 0.6 211 | # g = dde.geometry.Rectangle( 212 | # [BOX[0][0] - DPML, -1.5 - h], [BOX[1][0] + DPML, -1.5 + h] 213 | # ) 214 | # anchors = g.random_points(int(g.area / (dx / 4) ** 2)) 215 | 216 | data = dde.data.PDE( 217 | geom, 218 | pde_domain, 219 | # None, 220 | losses, 221 | # [], 222 | num_domain=int(geom.area / dx ** 2), 223 | num_boundary=int(geom.perimeter / dx), 224 | # anchors=anchors, 225 | # num_test=50000, 226 | # solution=solution_forward, 227 | ) 228 | model = dde.Model(data, net) 229 | 230 | # loss_weights = [0.5] * 2 231 | mu = 2 232 | print("-" * 80) 233 | print(f"Iteration 0: mu = {mu}\n") 234 | # loss_weights = [0.5 * mu] * 2 + [1] # penalty 235 | loss_weights = [0.5 * mu] * 2 + [0, 0] + [1] # augmented_Lagrangian 236 | model.compile( 237 | "adam", 238 | lr=0.001, 239 | loss_weights=loss_weights, 240 | # metrics=[l2_relative_error_1, l2_relative_error_2], 241 | ) 242 | losshistory, train_state = model.train(epochs=20000) 243 | # save_epsilon(geom2, model, "epsilon_init.dat") 244 | # return 245 | model.compile( 246 | "L-BFGS-B", 247 | loss_weights=loss_weights, 248 | # metrics=[l2_relative_error_1, l2_relative_error_2], 249 | ) 250 | losshistory, train_state = model.train() 251 | save_epsilon(geom2, model, "epsilon0.dat") 252 | # save_solution(geom, model, "E0.dat", "residual0.dat") 253 | 254 | # penalty(model, geom2, mu, 2) 255 | augmented_Lagrangian(model, geom, geom2, mu, 2) 256 | 257 | dde.saveplot(losshistory, train_state, issave=True, isplot=False) 258 | 259 | 260 | if __name__ == "__main__": 261 | main() 262 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | deepxde==0.9.1 2 | numpy 3 | scipy 4 | -------------------------------------------------------------------------------- /stokes/stokes.py: -------------------------------------------------------------------------------- 1 | import deepxde as dde 2 | import numpy as np 3 | from deepxde.backend import tf 4 | 5 | # tf.random.set_random_seed(1234) 6 | 7 | GAMMA = 0.9 8 | 9 | 10 | def save_solution(geom, model, filename): 11 | x = geom.uniform_points(40000) 12 | y_pred = model.predict(x) 13 | print("Saving u and p ...\n") 14 | np.savetxt(filename + "_fine.dat", np.hstack((x, y_pred, alpha(y_pred[:, -1:])))) 15 | 16 | x = geom.uniform_points(256) 17 | y_pred = model.predict(x) 18 | print("Saving u and p ...\n") 19 | np.savetxt(filename + "_coarse.dat", np.hstack((x, y_pred, alpha(y_pred[:, -1:])))) 20 | 21 | 22 | def alpha(rho): 23 | alpha_max, alpha_min = 2.5 * 10 ** 4, 0 # 2.5 / 10 ** 4 24 | q = 0.1 25 | return alpha_max + (alpha_min - alpha_max) * rho * (1 + q) / (rho + q) 26 | 27 | 28 | def pde(inputs, outputs): 29 | du_x = dde.grad.jacobian(outputs, inputs, i=0, j=0) 30 | dv_y = dde.grad.jacobian(outputs, inputs, i=1, j=1) 31 | du_xx = dde.grad.hessian(outputs, inputs, component=0, i=0, j=0) 32 | du_yy = dde.grad.hessian(outputs, inputs, component=0, i=1, j=1) 33 | dv_xx = dde.grad.hessian(outputs, inputs, component=1, i=0, j=0) 34 | dv_yy = dde.grad.hessian(outputs, inputs, component=1, i=1, j=1) 35 | dp_x = dde.grad.jacobian(outputs, inputs, i=2, j=0) 36 | dp_y = dde.grad.jacobian(outputs, inputs, i=2, j=1) 37 | f = alpha(outputs[:, 3:]) * outputs[:, :2] 38 | fx, fy = f[:, :1], f[:, 1:] 39 | loss1 = (-(du_xx + du_yy) + dp_x - fx) * 0.01 40 | loss2 = (-(dv_xx + dv_yy) + dp_y - fy) * 0.01 41 | loss3 = (du_x + dv_y) * 1e2 42 | # return loss1, loss2, loss3 # penalty 43 | return loss1, loss2, loss3, loss1, loss2, loss3 # augmented Lagrangian 44 | 45 | 46 | def volume(inputs, outputs, X): 47 | return outputs[:, 3:4] 48 | 49 | 50 | def loss_volume(_, y): 51 | return tf.math.square(tf.math.maximum(0.0, tf.reduce_mean(y) - GAMMA)) 52 | 53 | 54 | def dissipated_power(inputs, outputs, X): 55 | du = dde.grad.jacobian(outputs, inputs, i=0) 56 | dv = dde.grad.jacobian(outputs, inputs, i=1) 57 | p1 = tf.math.reduce_sum( 58 | tf.math.square(du) + tf.math.square(dv), axis=1, keepdims=True 59 | ) 60 | u2 = tf.math.reduce_sum(tf.math.square(outputs[:, :2]), axis=1, keepdims=True) 61 | p2 = alpha(outputs[:, 3:]) * u2 62 | return 0.5 * (p1 + p2) 63 | 64 | 65 | def loss_power(_, y): 66 | return tf.reduce_mean(y) 67 | 68 | 69 | def output_transform(inputs, outputs): 70 | x, y = inputs[:, :1], inputs[:, 1:] 71 | bc = 16 * x * (1 - x) * y * (1 - y) 72 | 73 | # u 74 | u0 = 1 75 | u = tf.math.abs(u0 + bc * outputs[:, :1]) 76 | # v 77 | v = bc * outputs[:, 1:2] 78 | # p 79 | p = (1 - x) * outputs[:, 2:3] 80 | # rho 81 | # rho = tf.math.exp(-bc * tf.math.square(outputs[:, 3:])) 82 | # rho = 1 + bc * outputs[:, 3:] 83 | center = tf.math.square(x - 0.5) + tf.math.square(y - 0.5) 84 | # rho = center * outputs[:, 3:] 85 | rho = center * ( 86 | bc * outputs[:, 3:] + (1 - bc) * (1 + 1e-6 / 0.25) / (center + 1e-6) 87 | ) 88 | rho = tf.math.maximum(0.0, tf.math.minimum(1.0, rho)) 89 | return tf.concat((u, v, p, rho), axis=1) 90 | 91 | 92 | def augmented_Lagrangian(model, geom, mu_PDE, mu_V, beta): 93 | x = model.data.train_x[np.sum(model.data.num_bcs) :] 94 | x_inside = model.data.train_x[: model.data.num_bcs[0]] 95 | lambla1 = np.zeros((len(x), 1)) 96 | lambla2 = np.zeros((len(x), 1)) 97 | lambla3 = np.zeros((len(x), 1)) 98 | lambdaV = 0 99 | mus = [[mu_PDE, mu_V, lambdaV]] 100 | 101 | for i in range(1, 10): 102 | # lambla is 1/3 of that defined in the paper. 103 | residual1, residual2, residual3, _, _, _ = model.predict(x, operator=pde) 104 | lambla1 += 2 / 3 * mu_PDE * residual1 105 | lambla2 += 2 / 3 * mu_PDE * residual2 106 | lambla3 += 2 / 3 * mu_PDE * residual3 107 | dV = np.mean(model.predict(x_inside)[:, 3:4]) - GAMMA 108 | lambdaV = max(lambdaV + 2 * mu_V * dV, 0) 109 | 110 | mu_PDE *= beta 111 | mu_V *= beta 112 | mus.append([mu_PDE, mu_V, lambdaV]) 113 | print("-" * 80) 114 | print(f"Iteration {i}: mu = {mu_PDE}, {mu_V}, lambdaV = {lambdaV}\n") 115 | 116 | def loss_PDE1(_, y): 117 | return tf.reduce_mean(lambla1 * y) 118 | 119 | def loss_PDE2(_, y): 120 | return tf.reduce_mean(lambla2 * y) 121 | 122 | def loss_PDE3(_, y): 123 | return tf.reduce_mean(lambla3 * y) 124 | 125 | def loss_V1(_, y): 126 | if lambdaV > 0: 127 | return tf.math.square(tf.reduce_mean(y) - GAMMA) 128 | return loss_volume(None, y) 129 | 130 | def loss_V2(_, y): 131 | return tf.reduce_mean(y) - GAMMA 132 | 133 | loss_weights = [mu_PDE / 3] * 3 + [1] * 3 + [mu_V, lambdaV, 1] 134 | loss = ( 135 | ["MSE"] * 3 136 | + [loss_PDE1, loss_PDE2, loss_PDE3] 137 | + [loss_V1, loss_V2, loss_power] 138 | ) 139 | model.compile("L-BFGS-B", loss=loss, loss_weights=loss_weights) 140 | losshistory, train_state = model.train(disregard_previous_best=True) 141 | 142 | np.savetxt(f"lambda1_{i}.dat", lambla1) 143 | np.savetxt(f"lambda2_{i}.dat", lambla2) 144 | np.savetxt(f"lambda3_{i}.dat", lambla3) 145 | np.savetxt(f"lambda_{i}.dat", np.hstack((x, lambla1, lambla2, lambla3))) 146 | np.savetxt("mu_lambdaV.dat", np.array(mus)) 147 | save_solution(geom, model, f"solution{i}") 148 | 149 | 150 | def main(): 151 | geom = dde.geometry.Rectangle([0, 0], [1, 1]) 152 | 153 | net = dde.maps.PFNN([2] + [[64] * 4] * 4 + [4], "tanh", "Glorot normal") # ? 154 | net.apply_output_transform(output_transform) 155 | 156 | losses = [ 157 | dde.OperatorBC(geom, volume, lambda x, _: not geom.on_boundary(x)), 158 | dde.OperatorBC(geom, volume, lambda x, _: not geom.on_boundary(x)), # augmented Lagrangian 159 | dde.OperatorBC(geom, dissipated_power, lambda x, _: not geom.on_boundary(x)), 160 | ] 161 | 162 | dx = 0.01 163 | data = dde.data.PDE( 164 | geom, 165 | pde, 166 | losses, 167 | num_domain=int(geom.area / dx ** 2), 168 | num_boundary=int(geom.perimeter / dx), 169 | ) 170 | model = dde.Model(data, net) 171 | 172 | mu_PDE, mu_V = 0.1, 1e4 # ? 173 | print("-" * 80) 174 | print(f"Iteration 0: mu = {mu_PDE}, {mu_V}\n") 175 | # loss_weights = [mu_PDE / 3] * 3 + [mu_V] + [1] # penalty 176 | # loss = ["MSE", "MSE", "MSE", loss_volume, loss_power] 177 | loss_weights = [mu_PDE / 3] * 3 + [0] * 3 + [mu_V, 0] + [1] # augmented Lagrangian 178 | loss = ["MSE"] * 3 + ["zero"] * 3 + [loss_volume, "zero", loss_power] 179 | model.compile( 180 | "adam", lr=0.0001, loss=loss, loss_weights=loss_weights, 181 | ) 182 | losshistory, train_state = model.train(epochs=20000) 183 | # save_solution(geom, model, "solution") 184 | # return 185 | model.compile( 186 | "L-BFGS-B", loss=loss, loss_weights=loss_weights, 187 | ) 188 | losshistory, train_state = model.train() 189 | save_solution(geom, model, "solution0") 190 | 191 | augmented_Lagrangian(model, geom, mu_PDE, mu_V, 2) 192 | 193 | dde.saveplot(losshistory, train_state, issave=True, isplot=False) 194 | 195 | 196 | if __name__ == "__main__": 197 | main() 198 | --------------------------------------------------------------------------------