├── teaser.jpg
├── shaders
├── edp.vert
└── edp.frag
└── README.md
/teaser.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cgskku/pvhv/HEAD/teaser.jpg
--------------------------------------------------------------------------------
/shaders/edp.vert:
--------------------------------------------------------------------------------
1 | #version 450
2 |
3 | // input vertex attributes
4 | layout(location=0) in vec3 position;
5 | layout(location=1) in vec3 normal;
6 | layout(location=2) in vec2 texcoord;
7 |
8 | // output attributes to fragment shader
9 | out VOUT
10 | {
11 | vec3 epos; // eye-space position
12 | vec3 wpos; // world-space position
13 | vec3 normal; // eye-space normal
14 | vec2 tex; // texture coordinate
15 | flat uint draw_id; // object ID
16 | } vout;
17 |
18 | // uniform variables
19 | uniform uint DrawID; // multidraw can use gl_DrawID alternatively
20 | uniform mat4 model_matrix; // object transformation matrix
21 | uniform struct camera_t
22 | {
23 | mat4 view_matrix;
24 | mat4 projection_matrix;
25 | float fovy, dnear, dfar;
26 | float E; // lens radius (i.e., sample bound in EDP)
27 | } cam;
28 |
29 | void main()
30 | {
31 | vout.wpos = (model_matrix*vec4(position,1)).xyz;
32 | vout.epos = (cam.view_matrix*vec4(vout.wpos,1)).xyz;
33 | gl_Position = cam.projection_matrix*vec4(vout.epos,1);
34 |
35 | vout.normal = normalize(mat3(model_matrix)*normal);
36 | vout.texcoord = texcoord;
37 | vout.draw_id = DrawID;
38 | }
39 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Potentially Visible Hidden-Volume Rendering for Multi-View Warping (ACM SIGGRAPH 2023)
Official GLSL Implementation
[Project](http://cg.skku.edu/pub/2023-kim-siggraph-pvhv) | [Paper](http://cg.skku.edu/pub/2023-kim-siggraph-pvhv)
2 |
3 | This repository contains the official shader implementation of Effective Depth Peeling used in the following paper:
4 |
5 | > [**Potentially Visible Hidden-Volume Rendering for Multi-View Warping**](http://cg.skku.edu/pub/2023-kim-siggraph-pvhv)
6 | >[Janghun Kim](http://cg.skku.edu/ppl/)1, [Sungkil Lee](http://cg.skku.edu/slee/)1
7 | > 1Sungkyunkwan University
8 | > *ACM Transactions on Graphics **(Proc. SIGGRAPH**) 2023*
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | ## Overview
17 | This paper presents the model and rendering algorithm of Potentially Visible Hidden Volumes (PVHVs) for multi-view image warping. PVHVs are 3D volumes that are occluded at a known source view, but potentially visible at novel views. Given a bound of novel views, we define PVHVs using the edges of foreground fragments from the known and the bound of novel views. PVHVs can be used to batch-test the visibilities of source fragments without iterating individual novel views in multi-fragment rendering, and thereby, cull redundant fragments prior to warping. We realize the model of PVHVs in Depth Peeling (DP). Our Effective Depth Peeling (EDP) can reduce the number of completely hidden fragments, capture important fragments early, and reduce warping cost. We demonstrate the benefit of our PVHVs and EDP in terms of memory, quality, and performance in multi-view warping.
18 |
19 | ## Citation
20 | If you find this code useful, please consider citing:
21 |
22 | ```
23 | @ARTICLE{kim23:pvhv,
24 | title={{Potentially Visible Hidden-Volume Rendering for Multi-View Warping}},
25 | author={Janghun Kim and Sungkil Lee},
26 | journal={{(Accepted to) ACM Trans. Graphics (Proc. SIGGRAPH)}},
27 | volume={42},
28 | number={4},
29 | pages={1--11},
30 | year={2023}
31 | }
32 | ```
33 |
--------------------------------------------------------------------------------
/shaders/edp.frag:
--------------------------------------------------------------------------------
1 | #version 450
2 |
3 | // DP model indexes
4 | #define MODEL_BDP 0
5 | #define MODEL_UDP 1
6 | #define MODEL_EDP 2
7 |
8 | // important constants/macros
9 | #define EDP_MAX_SAMPLES 64 // maximum of backward-search samples in EDP
10 | #define BDP_EPSILON 0.0005
11 |
12 | // input attributes from vertex shader
13 | in PIN
14 | {
15 | vec3 epos; // eye-space position
16 | vec3 wpos; // world-space position
17 | vec3 normal; // eye-space normal
18 | vec2 tex; // texture coordinate
19 | flat uint draw_id; // object ID
20 | } pin;
21 |
22 | // output fragment color
23 | out vec4 pout;
24 |
25 | // uniform texture
26 | uniform sampler2D SRC; // previous layer texture (encoded as RGBZI format)
27 |
28 | // uniform variables
29 | uniform float height; // vertical screen resolution
30 | uniform int model; // index in { MODEL_BDP, MODEL_UDP, MODEL_EDP }
31 | uniform int layer_index; // h: index of the current layer
32 | uniform uint edp_sample_count; // number of backward-search samples in EDP (default: 14)
33 | uniform float edp_delta; // depth threshold for connectivity test (default: 0.002)
34 |
35 | // uniform buffers
36 | layout(std140, binding=10) uniform SAM
37 | {
38 | // circular Poisson-Disk (or Halton) samples in [-1,1]: using .xy only
39 | vec4 PD[EDP_MAX_SAMPLES];
40 | };
41 |
42 | // BDP: Implementation of Baseline Depth Peeling [Everitt 2001]
43 | // Cass Everitt. Interactive order-independent transparency. NVIDIA 2001.
44 | // input: fragment depth, normalized blocker depth (in the previous layer)
45 | bool cull_bdp( float d, float zf )
46 | {
47 | if( zf==0 || zf>0.999 ) return true; // invalid/empty blocker
48 | if( d < mix( cam.dnear, cam.dfar, zf+BDP_EPSILON )) return true;
49 | return false;
50 | }
51 |
52 | // UDP: Implementation of Umbra culling-based Depth Peeling [Lee et al. 2010]
53 | // Sungkil Lee, Elmar Eisemann, and Hans-Peter Seidel. Real-Time Lens Blur Effects and Focus Control, ACM SIGGRAPH 2010.
54 | // input: fragment eye-space position, normalized blocker depth (in the previous layer)
55 | bool cull_umbra( vec3 epos, float zf )
56 | {
57 | float d = -epos.z; // fragment depth
58 | float df = mix( cam.dnear, cam.dfar, zf ); // blocker depth
59 | float s = tan( cam.fovy*0.5f )*2.0f*df/height; // pixel geometry size
60 | if(cam.E lens size
61 | float x = df*s/(cam.E-s);
62 | return d < df+x;
63 | }
64 |
65 | // Algorithm 1. LCOC()
66 | float LCOC( float d, float df ) // fragment depth, blocker depth
67 | {
68 | float K = float(height)*0.5f/df/tan(cam.fovy*0.5f); // screen-space LCOC scale
69 | return K*cam.E*abs(df-d)/d; // relative radius of COC against df (blocker depth)
70 | }
71 |
72 | // Algorithm 1. InPVHV()
73 | // input: texture coordinate, eye-space position of input fragment (i.e., p)
74 | bool InPVHV( vec2 tc, vec3 epos )
75 | {
76 | float d = -epos.z; // fragment depth
77 | vec4 q = texelFetch( SRC, ivec2(tc), 0 ); // blocker
78 | uint q_item = floatBitsToInt(q.w); if(q_item<0) return false; // bypass invalid blocker
79 |
80 | if(cull_bdp(d,q.z)) return false; // early culling with BDP
81 | if(layer_index>2) return !cull_umbra(epos,q.z); // hybrid DP: use UDP for h>2
82 |
83 | float df = mix(cam.dnear, cam.dfar, q.z);
84 | float R = LCOC(d, df);
85 | for( int k=0; k < edp_sample_count; k++ )
86 | {
87 | vec2 offset = PD[k].xy*R; // sample offset
88 | vec4 w = texelFetch(SRC, ivec2(round(tc+offset)), 0); // fetch sample
89 | uint w_item = floatBitsToInt(w.w); if(w_item<0) return false;
90 | if(w.z==0) return true; // empty sample
91 | if(w.z>0.99f) continue; // background
92 |
93 | // Correspond to Algorithm 1. EdgeExists()
94 | if( q_item != w_item ) return true; // edge exists
95 | else if( w.z>=q.z+edp_delta ) return true; // edge exists
96 | // conservative approximation: Line 17 in Algorithm 1
97 | else if( w.z<=q.z-edp_delta ) return true;
98 | // otherwise, the sample w is connected to blocker, requiring more tests
99 | else continue; // just for readability: this can be commented out in practice
100 | }
101 |
102 | return false;
103 | }
104 |
105 | void main()
106 | {
107 | // fragment culling
108 | if(model==MODEL_EDP)
109 | {
110 | if(!InPVHV( tc, pin.epos )) discard;
111 | }
112 | else // BDP or UDP
113 | {
114 | float zf = texelFetch( SRC, ivec2(gl_FragCoord.xy), 0 ).z; // blocker depth
115 | if( model==MODEL_BDP && cull_bdp( -pin.epos.z, zf )) discard;
116 | else if(model==MODEL_UDP && cull_umbra( pin.epos, zf )) discard;
117 | }
118 |
119 | // apply shading (e.g., Phong shading)
120 | if(!phong(pout, pin.epos, pin.normal, pin.tex, pin.draw_id)) discard;
121 |
122 | // encode output in RGBZI (color, depth, item) format
123 | pout.r = uintBitsToFloat(packHalf2x16(pout.rg)); // color.rg
124 | pout.g = uintBitsToFloat(packHalf2x16(pout.ba)); // color.ba
125 | pout.z = (-pin.epos.z-cam.dnear)/(cam.dfar-cam.dnear); // normalized linear depth
126 | pout.a = uintBitsToFloat(pin.draw_id); // object ID
127 | }
128 |
--------------------------------------------------------------------------------