Implementing depth testing for semi-transparent objects

你离开我真会死。 提交于 2019-12-07 03:38:42

This seems to be the what the paper linked by ripi2 is doing

function main() {
  const m4 = twgl.m4;
  const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
  if (!gl) {
    alert('need WebGL2');
    return;
  }
  const ext = gl.getExtension('EXT_color_buffer_float');
  if (!ext) {
    alert('EXT_color_buffer_float');
    return;
  }

  const vs = `
  #version 300 es
  layout(location=0) in vec4 position;
  uniform mat4 u_matrix;
  void main() {
    gl_Position = u_matrix * position;
  }
  `;

  const checkerFS = `
  #version 300 es
  precision highp float;

  uniform vec4 color1;
  uniform vec4 color2;

  out vec4 fragColor;

  void main() {
    ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
    fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
  }
  `;

  const transparentFS = `
  #version 300 es
  precision highp float;
  uniform vec4 Ci;

  out vec4 fragData[2];

  float w(float z, float a) {
    return a * max(pow(10.0,-2.0),3.0*pow(10.0,3.0)*pow((1.0 - z), 3.));
  }

  void main() {
    float ai = Ci.a;
    float zi = gl_FragCoord.z;

    float wresult = w(zi, ai);
    fragData[0] = vec4(Ci.rgb * wresult, ai);
    fragData[1].r = ai * wresult;
  }
  `;

  const compositeFS = `
  #version 300 es
  precision highp float;
  uniform sampler2D ATexture;
  uniform sampler2D BTexture;

  out vec4 fragColor;

  void main() {
    vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
    float r = accum.a;
    accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
    fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
  }
  `;

  const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
  const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
  const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);

  const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);

  const fbi = twgl.createFramebufferInfo(
    gl,
    [
      { internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
      { internalFormat: gl.R32F, minMag: gl.NEAREST },
    ]);

  function render(time) {
    time *= 0.001;

    twgl.setBuffersAndAttributes(gl, transparentProgramInfo, bufferInfo);

    // drawOpaqueSurfaces();
    gl.useProgram(checkerProgramInfo.program);
    gl.disable(gl.BLEND);
    twgl.setUniforms(checkerProgramInfo, {
      color1: [.5, .5, .5, 1],
      color2: [.7, .7, .7, 1],
      u_matrix: m4.identity(),
    });
    twgl.drawBufferInfo(gl, bufferInfo);

    twgl.bindFramebufferInfo(gl, fbi);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
    gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
    gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));

    gl.depthMask(false);
    gl.enable(gl.BLEND);
    gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);

    gl.useProgram(transparentProgramInfo.program);

    // drawTransparentSurfaces();
    const quads = [
       [ .4,  0,  0, .4],
       [ .4, .4,  0, .4],
       [  0, .4,  0, .4],
       [  0, .4, .4, .4],
       [  0, .0, .4, .4],
       [ .4, .0, .4, .4],
    ];
    quads.forEach((color, ndx) => {
      const u = ndx / (quads.length - 1);
      // change the order every second
      const v = ((ndx + time | 0) % quads.length) / (quads.length - 1);
      const xy = (u * 2 - 1) * .25;
      const z = (v * 2 - 1) * .25;
      let mat = m4.identity();
      mat = m4.translate(mat, [xy, xy, z]);
      mat = m4.scale(mat, [.3, .3, 1]);
      twgl.setUniforms(transparentProgramInfo, {
        Ci: color,
        u_matrix: mat,
      });
      twgl.drawBufferInfo(gl, bufferInfo);
    });

    twgl.bindFramebufferInfo(gl, null);
    gl.drawBuffers([gl.BACK]);

    gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);

    gl.useProgram(compositeProgramInfo.program);

    twgl.setUniforms(compositeProgramInfo, {
      ATexture: fbi.attachments[0],
      BTexture: fbi.attachments[1],
      u_matrix: m4.identity(),
    });

    twgl.drawBufferInfo(gl, bufferInfo);

    /* only needed if {alpha: false} not passed into getContext
    gl.colorMask(false, false, false, true);
    gl.clearColor(1, 1, 1, 1);
    gl.clear(gl.COLOR_BUFFER_BIT);
    gl.colorMask(true, true, true, true);
    */

    requestAnimationFrame(render);
  }
  requestAnimationFrame(render);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

Some things to note:

  • It's using WebGL2 but it should be possible in WebGL1, you'd have to change the shaders to use GLSL ES 1.0.
  • It's using floating point textures. The paper mentions you can use half float textures as well. Note that rendering to both half and float textures is an optional feature in even WebGL2. I believe most mobile hardware can render to half but not to float.
  • It's using weight equation 10 from the paper. There are 4 weight equations in the paper. 7, 8, 9, and 10. To do 7, 8, or 9 you'd need to pass in view space z from the vertex shader to the fragment shader
  • It's switching the order of drawing every second

The code is pretty straight forward.

It creates 3 shaders. One to draw a checkerboard just so we have something that is opaque to see the transparent stuff drawn above. One is the transparent object shader. The last is the shader the composites the transparent stuff into the scene.

Next it makes 2 textures, a floating point RGBA32F texture and a floating point R32F texture (red channel only). It attaches those to a framebuffer. (that is all done in the 1 function, twgl.createFramebufferInfo. That function makes the textures the same size as the canvas by default.

We make a single quad that goes from -1 to +1

We use that quad to draw the checkerboard into the canvas

Then we turn on blending, setup the blend equations as the paper said, switch to rendering onto our framebuffer, clear that framebuffer. note, it's cleared to 0,0,0,1 and 1 respectively. This is the version where we don't have separate blend functions per draw buffer. If you switch to the version that can use separate blending functions per draw buffer then you need to clear to different values and use a different shader (See paper)

Using our transparency shader we that same quad to draw 6 rectangles each of a solid color. I just used a solid color to keep it simple. Each is at a different Z and the Zs change every second just to see the results Z changing.

In the shader Ci is the input color. It's expected to be a premultiplied alpha color according to the paper. fragData[0]is the "accumulate" texture andfragData[1]is the "revealage" texture and is only one channel, red. Thew` function represents the equation 10 from the paper.

After all 6 quads are drawn we switch back to rendering to the canvas and use the compositing shader to composite the transparency result with the non-transparent canvas contents.

Here's an example with some geometry. Differences:

  • It's using equations (7) from the paper instead of (10)
  • In order to do correct zbuffering the depth buffer needs to be shared when doing opaque and transparent rendering. So there are 2 frames buffers. One buffer has RGBA8 + depth, the other is RGBA32F + R32F + depth. The depth buffer is shared.
  • The transparent renderer computes simple lighting and then uses the result as the Ci value from the paper
  • After compositing the transparent into the opaque we still need to copy the opaque into the canvas to see the result

function main() {
  const m4 = twgl.m4;
  const v3 = twgl.v3;
  const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
  if (!gl) {
    alert('need WebGL2');
    return;
  }
  const ext = gl.getExtension('EXT_color_buffer_float');
  if (!ext) {
    alert('EXT_color_buffer_float');
    return;
  }

  const vs = `
  #version 300 es
  layout(location=0) in vec4 position;
  layout(location=1) in vec3 normal;
  uniform mat4 u_projection;
  uniform mat4 u_modelView;
  
  out vec4 v_viewPosition;
  out vec3 v_normal;

  void main() {
    gl_Position = u_projection * u_modelView * position;
    v_viewPosition = u_modelView * position;
    v_normal = (u_modelView * vec4(normal, 0)).xyz;
  }
  `;

  const checkerFS = `
  #version 300 es
  precision highp float;

  uniform vec4 color1;
  uniform vec4 color2;

  out vec4 fragColor;

  void main() {
    ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
    fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
  }
  `;
  
  const opaqueFS = `
  #version 300 es
  precision highp float;
  
  in vec4 v_viewPosition;
  in vec3 v_normal;
  
  uniform vec4 u_color;
  uniform vec3 u_lightDirection;
  
  out vec4 fragColor;
  
  void main() {
    float light = abs(dot(normalize(v_normal), u_lightDirection));
    fragColor = vec4(u_color.rgb * light, u_color.a);
  }
  `;

  const transparentFS = `
  #version 300 es
  precision highp float;
  uniform vec4 u_color;
  uniform vec3 u_lightDirection;
  
  in vec4 v_viewPosition;
  in vec3 v_normal;
  
  out vec4 fragData[2];

  // eq (7)
  float w(float z, float a) {
    return a * max(
      pow(10.0, -2.0),
      min(
        3.0 * pow(10.0, 3.0),
        10.0 /
        (pow(10.0, -5.0) + 
         pow(abs(z) / 5.0, 2.0) +
         pow(abs(z) / 200.0, 6.0)
        )
      )
    );
  }

  void main() {
    float light = abs(dot(normalize(v_normal), u_lightDirection));
    vec4 Ci = vec4(u_color.rgb * light, u_color.a);
  
    float ai = Ci.a;
    float zi = gl_FragCoord.z;

    float wresult = w(zi, ai);
    fragData[0] = vec4(Ci.rgb * wresult, ai);
    fragData[1].r = ai * wresult;
  }
  `;

  const compositeFS = `
  #version 300 es
  precision highp float;
  uniform sampler2D ATexture;
  uniform sampler2D BTexture;
  
  out vec4 fragColor;

  void main() {
    vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
    float r = accum.a;
    accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
    fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
  }
  `;
  
  const blitFS = `
  #version 300 es
  precision highp float;
  uniform sampler2D u_texture;
  
  out vec4 fragColor;

  void main() {
    fragColor = texelFetch(u_texture, ivec2(gl_FragCoord.xy), 0);
  }
  `;

  const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
  const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
  const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
  const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
  const blitProgramInfo = twgl.createProgramInfo(gl, [vs, blitFS]);

  const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
  const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
  const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
  
  function makeVAO(programInfo, bufferInfo) {
    return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
  }
  
  // In order to do proper zbuffering we need to share
  // the depth buffer 
  
  const opaqueAttachments = [
    { internalFormat: gl.RGBA8, minMag: gl.NEAREST },
    { format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
  ];
  const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
  
  const transparentAttachments = [
    { internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
    { internalFormat: gl.R32F, minMag: gl.NEAREST },
    { format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
  ];
  const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);

  function render(time) {
    time *= 0.001;

    if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
      // if the canvas is resized also resize the framebuffer
      // attachments (the depth buffer will be resized twice 
      // but I'm too lazy to fix it)
      twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
      twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
    }
    
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const fov = 45 * Math.PI / 180;
    const zNear = 0.1;
    const zFar = 500;
    
    const projection = m4.perspective(fov, aspect, zNear, zFar);
    const eye = [0, 0, -5];
    const target = [0, 0, 0];
    const up = [0, 1, 0];
    const camera = m4.lookAt(eye, target, up);
    const view = m4.inverse(camera);

    const lightDirection = v3.normalize([1, 3, 5]);

    twgl.bindFramebufferInfo(gl, opaqueFBI);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0]);    
    gl.depthMask(true);
    gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

    gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);

    // drawOpaqueSurfaces();
    // draw checkerboard
    gl.useProgram(checkerProgramInfo.program);
    gl.disable(gl.DEPTH_TEST);
    gl.disable(gl.BLEND);
    twgl.setUniforms(checkerProgramInfo, {
      color1: [.5, .5, .5, 1],
      color2: [.7, .7, .7, 1],
      u_projection: m4.identity(),
      u_modelView: m4.identity(),
    });
    twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);

    // draw a cube with depth buffer
    gl.enable(gl.DEPTH_TEST);
    
    {
      gl.useProgram(opaqueProgramInfo.program);
      gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
      let mat = view;
      mat = m4.rotateX(mat, time * .1);
      mat = m4.rotateY(mat, time * .2);
      mat = m4.scale(mat, [1.5, 1.5, 1.5]);
      twgl.setUniforms(opaqueProgramInfo, {
        u_color: [1, .5, .2, 1],
        u_lightDirection: lightDirection,
        u_projection: projection,
        u_modelView: mat,
      });    
      twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
    }
    
    twgl.bindFramebufferInfo(gl, transparentFBI);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
    // these values change if using separate blend functions
    // per attachment (something WebGL2 does not support)
    gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
    gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));

    gl.depthMask(false);  // don't write to depth buffer (but still testing)
    gl.enable(gl.BLEND);
    // this changes if using separate blend functions per attachment
    gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);

    gl.useProgram(transparentProgramInfo.program);
    gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);

    // drawTransparentSurfaces();
    const spheres = [
       [ .4,  0,  0, .4],
       [ .4, .4,  0, .4],
       [  0, .4,  0, .4],
       [  0, .4, .4, .4],
       [  0, .0, .4, .4],
       [ .4, .0, .4, .4],
    ];
    spheres.forEach((color, ndx) => {
      const u = ndx + 2;
      let mat = view;
      mat = m4.rotateX(mat, time * u * .1);
      mat = m4.rotateY(mat, time * u * .2);
      mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
      twgl.setUniforms(transparentProgramInfo, {
        u_color: color,
        u_lightDirection: lightDirection,
        u_projection: projection,
        u_modelView: mat,
      });
      twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
    });

    // composite transparent results with opaque
    twgl.bindFramebufferInfo(gl, opaqueFBI);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0]);

    gl.disable(gl.DEPTH_TEST);
    gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);

    gl.useProgram(compositeProgramInfo.program);
    gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);

    twgl.setUniforms(compositeProgramInfo, {
      ATexture: transparentFBI.attachments[0],
      BTexture: transparentFBI.attachments[1],
      u_projection: m4.identity(),
      u_modelView: m4.identity(),
    });

    twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);

    /* only needed if {alpha: false} not passed into getContext
    gl.colorMask(false, false, false, true);
    gl.clearColor(1, 1, 1, 1);
    gl.clear(gl.COLOR_BUFFER_BIT);
    gl.colorMask(true, true, true, true);
    */
    
    // draw opaque color buffer into canvas
    // could probably use gl.blitFramebuffer
    gl.disable(gl.BLEND);
    twgl.bindFramebufferInfo(gl, null);
    gl.useProgram(blitProgramInfo.program);
    gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);

    twgl.setUniforms(blitProgramInfo, {
      u_texture: opaqueFBI.attachments[0],
      u_projection: m4.identity(),
      u_modelView: m4.identity(),
    });
    twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);

    requestAnimationFrame(render);
  }
  requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

It occurs to me rather than use standard OpenGL blending for the last 2 steps (composite followed by blit) we could change the composite shader so it takes 3 textures (ATexutre, BTexture, opaqueTexture) and blends in the shader outputting directly to the canvas. That would be faster.

function main() {
  const m4 = twgl.m4;
  const v3 = twgl.v3;
  const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
  if (!gl) {
    alert('need WebGL2');
    return;
  }
  const ext = gl.getExtension('EXT_color_buffer_float');
  if (!ext) {
    alert('EXT_color_buffer_float');
    return;
  }

  const vs = `
  #version 300 es
  layout(location=0) in vec4 position;
  layout(location=1) in vec3 normal;
  uniform mat4 u_projection;
  uniform mat4 u_modelView;
  
  out vec4 v_viewPosition;
  out vec3 v_normal;

  void main() {
    gl_Position = u_projection * u_modelView * position;
    v_viewPosition = u_modelView * position;
    v_normal = (u_modelView * vec4(normal, 0)).xyz;
  }
  `;

  const checkerFS = `
  #version 300 es
  precision highp float;

  uniform vec4 color1;
  uniform vec4 color2;

  out vec4 fragColor;

  void main() {
    ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
    fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
  }
  `;
  
  const opaqueFS = `
  #version 300 es
  precision highp float;
  
  in vec4 v_viewPosition;
  in vec3 v_normal;
  
  uniform vec4 u_color;
  uniform vec3 u_lightDirection;
  
  out vec4 fragColor;
  
  void main() {
    float light = abs(dot(normalize(v_normal), u_lightDirection));
    fragColor = vec4(u_color.rgb * light, u_color.a);
  }
  `;

  const transparentFS = `
  #version 300 es
  precision highp float;
  uniform vec4 u_color;
  uniform vec3 u_lightDirection;
  
  in vec4 v_viewPosition;
  in vec3 v_normal;
  
  out vec4 fragData[2];

  // eq (7)
  float w(float z, float a) {
    return a * max(
      pow(10.0, -2.0),
      min(
        3.0 * pow(10.0, 3.0),
        10.0 /
        (pow(10.0, -5.0) + 
         pow(abs(z) / 5.0, 2.0) +
         pow(abs(z) / 200.0, 6.0)
        )
      )
    );
  }

  void main() {
    float light = abs(dot(normalize(v_normal), u_lightDirection));
    vec4 Ci = vec4(u_color.rgb * light, u_color.a);
  
    float ai = Ci.a;
    float zi = gl_FragCoord.z;

    float wresult = w(zi, ai);
    fragData[0] = vec4(Ci.rgb * wresult, ai);
    fragData[1].r = ai * wresult;
  }
  `;

  const compositeFS = `
  #version 300 es
  precision highp float;
  uniform sampler2D ATexture;
  uniform sampler2D BTexture;
  uniform sampler2D opaqueTexture;
  
  out vec4 fragColor;

  void main() {
    vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
    float r = accum.a;
    accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
    vec4 transparentColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
    vec4 opaqueColor = texelFetch(opaqueTexture, ivec2(gl_FragCoord.xy), 0);
    //  gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
    fragColor = transparentColor * (1. - r) + opaqueColor * r;
  }
  `;
  
  const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
  const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
  const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
  const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);

  const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
  const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
  const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
  
  function makeVAO(programInfo, bufferInfo) {
    return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
  }
  
  // In order to do proper zbuffering we need to share
  // the depth buffer 
  
  const opaqueAttachments = [
    { internalFormat: gl.RGBA8, minMag: gl.NEAREST },
    { format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
  ];
  const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
  
  const transparentAttachments = [
    { internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
    { internalFormat: gl.R32F, minMag: gl.NEAREST },
    { format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
  ];
  const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);

  function render(time) {
    time *= 0.001;

    if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
      // if the canvas is resized also resize the framebuffer
      // attachments (the depth buffer will be resized twice 
      // but I'm too lazy to fix it)
      twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
      twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
    }
    
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const fov = 45 * Math.PI / 180;
    const zNear = 0.1;
    const zFar = 500;
    
    const projection = m4.perspective(fov, aspect, zNear, zFar);
    const eye = [0, 0, -5];
    const target = [0, 0, 0];
    const up = [0, 1, 0];
    const camera = m4.lookAt(eye, target, up);
    const view = m4.inverse(camera);

    const lightDirection = v3.normalize([1, 3, 5]);

    twgl.bindFramebufferInfo(gl, opaqueFBI);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0]);    
    gl.depthMask(true);
    gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

    gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);

    // drawOpaqueSurfaces();
    // draw checkerboard
    gl.useProgram(checkerProgramInfo.program);
    gl.disable(gl.DEPTH_TEST);
    gl.disable(gl.BLEND);
    twgl.setUniforms(checkerProgramInfo, {
      color1: [.5, .5, .5, 1],
      color2: [.7, .7, .7, 1],
      u_projection: m4.identity(),
      u_modelView: m4.identity(),
    });
    twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);

    // draw a cube with depth buffer
    gl.enable(gl.DEPTH_TEST);
    
    {
      gl.useProgram(opaqueProgramInfo.program);
      gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
      let mat = view;
      mat = m4.rotateX(mat, time * .1);
      mat = m4.rotateY(mat, time * .2);
      mat = m4.scale(mat, [1.5, 1.5, 1.5]);
      twgl.setUniforms(opaqueProgramInfo, {
        u_color: [1, .5, .2, 1],
        u_lightDirection: lightDirection,
        u_projection: projection,
        u_modelView: mat,
      });    
      twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
    }
    
    twgl.bindFramebufferInfo(gl, transparentFBI);
    gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
    // these values change if using separate blend functions
    // per attachment (something WebGL2 does not support)
    gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
    gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));

    gl.depthMask(false);  // don't write to depth buffer (but still testing)
    gl.enable(gl.BLEND);
    // this changes if using separate blend functions per attachment
    gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);

    gl.useProgram(transparentProgramInfo.program);
    gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);

    // drawTransparentSurfaces();
    const spheres = [
       [ .4,  0,  0, .4],
       [ .4, .4,  0, .4],
       [  0, .4,  0, .4],
       [  0, .4, .4, .4],
       [  0, .0, .4, .4],
       [ .4, .0, .4, .4],
    ];
    spheres.forEach((color, ndx) => {
      const u = ndx + 2;
      let mat = view;
      mat = m4.rotateX(mat, time * u * .1);
      mat = m4.rotateY(mat, time * u * .2);
      mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
      twgl.setUniforms(transparentProgramInfo, {
        u_color: color,
        u_lightDirection: lightDirection,
        u_projection: projection,
        u_modelView: mat,
      });
      twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
    });

    // composite transparent results with opaque
    twgl.bindFramebufferInfo(gl, null);

    gl.disable(gl.DEPTH_TEST);
    gl.disable(gl.BLEND);
    
    gl.useProgram(compositeProgramInfo.program);
    gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);

    twgl.setUniforms(compositeProgramInfo, {
      ATexture: transparentFBI.attachments[0],
      BTexture: transparentFBI.attachments[1],
      opaqueTexture: opaqueFBI.attachments[0],
      u_projection: m4.identity(),
      u_modelView: m4.identity(),
    });

    twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);

    /* only needed if {alpha: false} not passed into getContext
    gl.colorMask(false, false, false, true);
    gl.clearColor(1, 1, 1, 1);
    gl.clear(gl.COLOR_BUFFER_BIT);
    gl.colorMask(true, true, true, true);
    */
    
    requestAnimationFrame(render);
  }
  requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

I have three requirements for my depth testing of semi-transparent objects

It's actually quite rare to have self-intersecting objects with partially transparent (actually blended) samples. The common cases for self-intersecting geometry is grass and leaves. However, in these cases the actual areas covered by grass and leaves are not transparent - they are opaque.

The common solution here is alpha testing. Render the leaves as an opaque (not blended) quad (with a normal depth test and write), and discard fragments which have insufficient alpha (e.g. because they are outside of the leaf). Because individual samples here are opaque, then you get order independence for free because the depth test works as you would expect for an opaque object.

If you want blended edges, then enable alpha-to-coverage and let the multi-sample resolve clean up the edges a little.

For the small amount of actually transparent stuff you have left, then normally you need to a back-to-front sort on the CPU, and render it after the opaque pass.

Proper OIT is possible, but is is generally quite an expensive technique, so I've yet to see anyone actually use it outside of an academic environment (at least on mobile OpenGL ES implementations).

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!