3. Image Processing

Image Processing #

Problem statement #

Exercise

Implement an image / video processing app supporting different masks, including other kernel sizes different than 3x3, and:

  • A region-of-interest base tool to selectively apply a given mask.
  • A magnifier tool.
  • Integrate luma and other coloring brightness tools.

Background #

In image processing, a kernel, convolution matrix, or mask is a small matrix used for blur, sharpening, enhancement, edge detection, and more. This is accomplished by performing a convolution between a kernel and an image.

1. Identity: This mask returns the same image.

2. Sharpen: The sharpen kernel emphasizes the differences in adjacent pixel values which makes the image appear more vivid.

3. Edge Detection: The mask to find the points of drastic change of brightness in an image.

4. Box blur: A Box Blur is a linear filter in which each pixel results from the average value of its neighbors.

5. Gaussian: The visual effect of this blurring technique is a smooth blur resembling that of viewing the image through a translucent screen

6. Emboss: Image embossing is a computer graphics technique in which each pixel of an image is replaced either by a highlight or a shadow, depending on light/dark boundaries on the original image.

Coloring brightness #

1. Original: No change is made to the image.

2. Component average: As a lightness dimension, does not attempt to “fill” a cylinder by its definition of saturation.

3. Luma: Luma is the weighted average of gamma-corrected R, G, and B, based on their contribution to perceived lightness

4. HSV: (for hue, saturation, value) defines colors based on three factors:

  • Hue: It is what is called color
  • Saturacion: Measures the purity of a color, that is, the absence of other colors
  • Value or Brightness: Refers to the intensity of light, that is, the amount of light in that color

5. HSL: (For hue, saturation, lightness) describes colors with Hue, Saturation and Lightness parameters, the latter being defined as the average between the minimum and maximum value of its RGB coordinates.

The main difference with the HSV space is that if we start from a pure hue and lower the saturation to the minimum in the HSV model we would obtain the white color and in the HSL the gray color.

Code #

Code Image Processing .js
let theShader;
let img;
let input;
let select_mask, select_brightness, select_type;
let mask_value = 0;
let region, magnifier;
let coloring_tool = 'None';
let coloring_tool_value = 0;
let vid;
let cam;

function preload() {
  theShader = readShader('/vc_page/sketches/shaders/image/shader.frag',
                        { varyings: Tree.texcoords2 });
  img = loadImage('/vc_page/sketches/shaders/image/hojas.jpg');
  
  vid = createVideo(['/vc_page/sketches/shaders/image/dog.mp4']);
  vid.hide();
}

function setup() {
  createCanvas(700, 500, WEBGL);
  noStroke();
  textureMode(NORMAL);
  shader(theShader);
  
  select_mask = createSelect();
  select_mask.position(20, 15);
  select_mask.style('color', 'black');
  select_mask.option('Identity', 0);
  select_mask.option('Sharpen', 1);
  select_mask.option('Box Blur', 2);
  select_mask.option('Edge Detection', 3);
  select_mask.option('Gaussian', 4);
  select_mask.option('Emboss', 5);  
  select_mask.changed(selectEventMask);
  
  theShader.setUniform('mask', [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]);
  
  region = createCheckbox('Region', false);
  region.style('color', 'white');
  region.position(150, 15);
  
  magnifier = createCheckbox('Magnifier', false);
  magnifier.style('color', 'white');
  magnifier.position(230, 15);

  select_brightness = createSelect();
  select_brightness.position(330, 15);
  select_brightness.style('color', 'black');
  select_brightness.option('Original', 0);
  select_brightness.option('Component Average', 1);
  select_brightness.option('Luma', 2);
  select_brightness.option('HSV', 3);
  select_brightness.option('HSL', 4);
  select_brightness.changed(selectEventBrightness);
  
  cam = createCapture(VIDEO);
  cam.hide();
  
  select_type = createSelect();
  select_type.position(500, 15);
  select_type.style('color', 'black');
  select_type.option('Imagen', 0);
  select_type.option('Video', 1);
  select_type.option('Camera', 2);
  select_type.changed(selectEventType);
  
  theShader.setUniform('texture', img);
  emitResolution(theShader, 'u_resolution');    
}

function selectEventMask(){
  mask_value = select_mask.value();
  
  switch (mask_value){
      case '0':
        theShader.setUniform('mask', [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]); 
        break;
      case '1':
        theShader.setUniform('mask', [-1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -1.0, 0.0, -1.0]);
        break;
      case '2':
        theShader.setUniform('mask', [0.1111, 0.1111, 0.1111, 0.1111, 0.1111, 0.1111, 0.1111, 0.1111, 0.1111]);
        break;
      case '3':
        theShader.setUniform('mask', [-1.0, -1.0, -1.0, -1.0, 8.0, -1.0, -1.0, -1.0, -1.0]);
        break;
      case '4':
        theShader.setUniform('mask', [0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625, 0.125, 0.0625]); 
        break;
      case '5':
        theShader.setUniform('mask', [-2.0, -1.0, 0.0, -1.0, 1.0, 0.0, 0.0, 1.0, 0.0]); 
        break;
      default:
        console.log(mask_value);
        break;
  }
}

function selectEventBrightness(){
  coloring_tool_value = select_brightness.value();
}

function selectEventType(){
  type_value = select_type.value();
  
  switch (type_value){
    case '0':
      theShader.setUniform('texture', img);
      vid.pause();
      break;
    case '1':
      theShader.setUniform('texture', vid);
      vid.loop();
      break;
    case '2':
      theShader.setUniform('texture', cam);
      vid.pause();
      break; 
    default:
      console.log(type_value);
      break;
  }
}

function draw() {
  background(0);
  
  if (region.checked()) {
    emitMousePosition(theShader, 'u_mouse');
    theShader.setUniform('region', true);
  } else if (magnifier.checked()) {
    emitMousePosition(theShader, 'u_mouse');
    theShader.setUniform('magnifier', true);
  }else{
    theShader.setUniform('region', false);
    theShader.setUniform('magnifier', false);
  }
  
  theShader.setUniform('coloring_tool', coloring_tool_value);
  
  emitTexOffset(theShader, img, 'texOffset');
  
  quad(-width / 2 + 20, -height / 2 + 35, width / 2 - 20, -height / 2 + 35,
        width / 2 - 20, height / 2 - 30, -width / 2 + 20, height / 2 - 30);
}

// Ref: https://p5js.org/es/examples/3d-shader-using-webcam.html

Code Image Processing .frag
precision mediump float;

// uniforms are defined and sent by the sketch
uniform int coloring_tool;
uniform sampler2D texture;
varying vec2 texcoords2;
uniform vec2 texOffset;
// holds the 3x3 kernel
uniform float mask[9];

uniform bool region;
uniform bool magnifier;
uniform vec2 u_mouse;
uniform vec2 u_resolution;

// returns component average of given texel
float avg(vec3 texel) {
  return (texel.r + texel.g + texel.b)/3.0;
}

// returns luma of given texel
float luma(vec3 texel) {
  return 0.299 * texel.r + 0.587 * texel.g + 0.114 * texel.b;
}

// returns hsv of given texel
float hsv(vec3 texel) {
  return max(max(texel.r, texel.b), max(texel.r,texel.g));
}

// returns hsl of given texel
float hsl(vec3 texel){
  float maxColor = max(max(texel.r, texel.g), texel.b);
  float minColor = min(min(texel.r, texel.g), texel.b);
  return (maxColor + minColor)/2.0;
}

// returns 
vec4 coloringBrightness(vec4 texel){
  
  if (coloring_tool == 0){
    texel = texel;
  }else if (coloring_tool == 1){
    texel = vec4((vec3(avg(texel.rgb))), 1.0);
  }else if (coloring_tool == 2){
    texel = vec4((vec3(luma(texel.rgb))), 1.0);
  }else if (coloring_tool == 3){
    texel = vec4((vec3(hsv(texel.rgb))), 1.0);
  }else if (coloring_tool == 4){
    texel = vec4((vec3(hsl(texel.rgb))), 1.0);
  }
  
  return texel;
  
}

vec4 convolutionMask(){
  // 1. Use offset to move along texture space.
  // In this case to find the texcoords of the texel neighbours.
  vec2 tc0 = texcoords2 + vec2(-texOffset.s, -texOffset.t);
  vec2 tc1 = texcoords2 + vec2(         0.0, -texOffset.t);
  vec2 tc2 = texcoords2 + vec2(+texOffset.s, -texOffset.t);
  vec2 tc3 = texcoords2 + vec2(-texOffset.s,          0.0);
  // origin (current fragment texcoords)
  vec2 tc4 = texcoords2 + vec2(         0.0,          0.0);
  vec2 tc5 = texcoords2 + vec2(+texOffset.s,          0.0);
  vec2 tc6 = texcoords2 + vec2(-texOffset.s, +texOffset.t);
  vec2 tc7 = texcoords2 + vec2(         0.0, +texOffset.t);
  vec2 tc8 = texcoords2 + vec2(+texOffset.s, +texOffset.t);
  
  
  // 2. Sample texel neighbours within the rgba array
  vec4 rgba[9];
  rgba[0] = texture2D(texture, tc0);
  rgba[1] = texture2D(texture, tc1);
  rgba[2] = texture2D(texture, tc2);
  rgba[3] = texture2D(texture, tc3);
  rgba[4] = texture2D(texture, tc4);
  rgba[5] = texture2D(texture, tc5);
  rgba[6] = texture2D(texture, tc6);
  rgba[7] = texture2D(texture, tc7);
  rgba[8] = texture2D(texture, tc8);

  // 3. Apply convolution kernel
  vec4 convolution;
  for (int i = 0; i < 9; i++) {
    convolution += rgba[i]*mask[i];
  }
  
  return vec4(convolution.rgb, 1.0); 
}

void main() {
  // texture2D(texture, texcoords2) samples texture at texcoords2 
  // and returns the normalized texel color
  vec4 texel = texture2D(texture, texcoords2);
  
  //Shadertoy Code Magnifier
  float R = 100.0;
  float h = 40.0;
  float hr = R * sqrt(1.0 - ((R - h) / R) * ((R - h) / R));
  
  vec2 xy = gl_FragCoord.xy - u_mouse.xy;
  float r = sqrt(xy.x * xy.x + xy.y * xy.y);
  vec2 new_xy = r < hr ? xy * (R - h) / sqrt(R * R - r * r) : xy;
  //
  
  texel = convolutionMask();
  //texel = coloringBrightness(texel);
    
  if (region){
    if(r < hr){
      gl_FragColor = convolutionMask();
      gl_FragColor = coloringBrightness(texel);
    }else{
      gl_FragColor = texture2D(texture, texcoords2);
    }
  } else if (magnifier){
    gl_FragColor = texture2D(texture, (new_xy.xy + u_mouse.xy) / (u_resolution.xy));
  } else {
    gl_FragColor = convolutionMask();
    gl_FragColor = coloringBrightness(texel);
  }
}

// Ref: https://www.shadertoy.com/view/Ms33WX

Conclusions #

  • In image processing, the input is a low quality image and the output is an improved quality image. Common image processing includes image enhancement, restoration, encoding, and compression.
  • As general-purpose computers have become faster, digital image processing has become the most common form of image processing and is widely used because it is not only the most versatile method, but also the cheapest.

References #