Spaces:
Paused
Paused
""" | |
The OpenGL specification doesn't allow you to create a context without a window, | |
since it needs the pixel format that you set into the device context. | |
Actually, it is necessary to have a window handler to create a "traditional" rendering context. | |
It is used to fetch OpenGL information and extensions availability. | |
Once you got that information, you can destroy the render context and release the "dummy" window. | |
So, in this code, the window is created, the context is set to this window, | |
the image result is saved to an output image file and, then, this window is released. | |
""" | |
# import glfw | |
import OpenGL | |
import ctypes | |
import ctypes.util | |
# from lucid.misc.gl.glcontext import create_opengl_context | |
# from lib.glcontext import create_opengl_context | |
import glfw | |
from OpenGL.GL import * | |
import OpenGL.GL.shaders | |
import numpy | |
from PIL import Image | |
import base64 | |
from io import BytesIO | |
from utils.settings import set_options | |
from pyvirtualdisplay import Display | |
def image_enhance(image, exposure, saturation, contrast, brightness, gamma, shadows, highlights, whites, blacks, | |
clarity, temperature, sharpness): | |
# create_opengl_context((image.width,image.height)) | |
# Initialize glfw | |
disp = Display() | |
disp.start() | |
if not glfw.init(): | |
print('error in init') | |
return | |
# Create window | |
# Size (1, 1) for show nothing in window | |
glfw.window_hint(glfw.VISIBLE, False) | |
window = glfw.create_window(1, 1, "My OpenGL window", None, None) | |
# window = glfw.create_window(800, 600, "My OpenGL window", None, None) | |
# Terminate if any issue | |
if not window: | |
print('error in window') | |
glfw.terminate() | |
return | |
# Set context to window | |
glfw.make_context_current(window) | |
disp.stop() | |
# | |
# Initial data | |
# Positions, colors, texture coordinates | |
''' | |
# positions colors texture coords | |
quad = [ -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, | |
0.5, -0.5, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, | |
0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, | |
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0] | |
''' | |
# positions colors texture coords | |
quad = [-1., -1., 0., 0., | |
1., -1., 1., 0., | |
1., 1., 1., 1., | |
-1., 1., 0., 1.] | |
quad = numpy.array(quad, dtype=numpy.float32) | |
# Vertices indices order | |
indices = [0, 1, 2, | |
2, 3, 0] | |
indices = numpy.array(indices, dtype=numpy.uint32) | |
# print(quad.itemsize * len(quad)) | |
# print(indices.itemsize * len(indices)) | |
# print(quad.itemsize * 8) | |
# | |
# Vertex shader | |
vertex_shader = """ | |
attribute vec4 a_position; | |
attribute vec4 a_color; | |
attribute vec2 a_texCoord; | |
varying vec2 v_texCoord; | |
varying vec4 v_color; | |
void main() { | |
gl_Position = a_position; | |
v_texCoord = a_texCoord; | |
v_color = vec4(a_color.rgb * a_color.a, a_color.a); | |
} | |
""" | |
# Fragment shader | |
fragment_shader = """ | |
varying vec2 v_texCoord; | |
uniform sampler2D u_image; | |
uniform float u_gamma; | |
uniform float u_shadows; | |
uniform float u_highlights; | |
uniform float u_whites; | |
uniform float u_blacks; | |
uniform float u_clarity; | |
uniform mat4 u_colorMatrix; | |
uniform vec4 u_colorOffset; | |
uniform vec2 u_pixelDimension; | |
uniform mat4 u_clarityMatrix; | |
uniform vec4 u_clarityOffset; | |
uniform float u_temperature; | |
uniform float u_sharpness; | |
const vec3 warmFilter = vec3(0.93, 0.54, 0.0); | |
const mat3 RGBtoYIQ = mat3(0.299, 0.587, 0.114, 0.596, -0.274, -0.322, 0.212, -0.523, 0.311); | |
const mat3 YIQtoRGB = mat3(1.0, 0.956, 0.621, 1.0, -0.272, -0.647, 1.0, -1.105, 1.702); | |
const float EPSILON = 0.0000001; | |
vec4 unpremultiply(vec4 col) { | |
col.rgb /= max(col.a, EPSILON); | |
return col; | |
} | |
float calculateLuminance(vec3 rgb) { | |
// This is the luminance calculation part of the RGB to HSL formular. | |
vec4 p = mix( | |
vec4(rgb.gb, 0.0, -1.0 / 3.0), | |
vec4(rgb.bg, -1.0, 2.0 / 3.0), | |
vec4(rgb.g < rgb.b) | |
); | |
vec4 q = mix( | |
vec4(rgb.r, p.yzx), | |
vec4(p.xyw, rgb.r), | |
vec4(rgb.r < p.x) | |
); | |
float croma = q.x - min(q.w, q.y); | |
float luminance = q.x - croma * 0.5; | |
return luminance; | |
} | |
vec3 map(vec3 x, float in_min, float in_max, float out_min, float out_max){ | |
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min; | |
} | |
void main() { | |
vec4 color = clamp(texture2D(u_image, v_texCoord), 0.0, 1.0); | |
color.rgb /= max(color.a, EPSILON); // Revert premultiplied alpha | |
// Apply gamma | |
if (u_gamma != 1.0) { | |
color.rgb = pow(color.rgb, vec3(1.0 / max(u_gamma, EPSILON))); | |
} | |
// Apply shadows and highlights | |
float luminance = calculateLuminance(color.rgb); | |
float shadow = u_shadows >= 0.0 | |
? clamp( | |
pow(luminance, 1.0 / (u_shadows + 1.0)) | |
+ pow(luminance, 2.0 / (u_shadows + 1.0)) * -0.76 | |
- luminance | |
, 0.0, max(u_shadows, 1.0)) | |
: -clamp( | |
pow(luminance, 1.0 / (-u_shadows + 1.0)) | |
+ pow(luminance, 2.0 / (-u_shadows + 1.0)) * -0.76 | |
- luminance | |
, 0.0, max(-u_shadows, 1.0)); | |
float highlight = u_highlights < 0.0 | |
? clamp( | |
1.0 | |
- pow(1.0 - luminance, 1.0 / (1.0 - u_highlights)) | |
- pow(1.0 - luminance, 2.0 / (1.0 - u_highlights)) * -0.8 | |
- luminance | |
, -1.0, 0.0) | |
: -clamp( | |
1.0 | |
- pow(1.0 - luminance, 1.0 / (1.0 + u_highlights)) | |
- pow(1.0 - luminance, 2.0 / (1.0 + u_highlights)) * -0.8 | |
- luminance | |
, -1.0, 0.0); | |
// Bright color need more contrast and dark color need more brightness. | |
// This is to keep saturatation because the color information of a dark colors is lost. | |
float shadowContrast = shadow * luminance * luminance; | |
float shadowBrightness = shadow - shadowContrast; | |
float offset = luminance + shadowContrast + highlight; | |
color.rgb = clamp(offset * ((color.rgb + shadowBrightness) / max(luminance, EPSILON)), 0.0, 1.0); | |
// Apply Color Matrix | |
color.rgb = clamp(color * u_colorMatrix + u_colorOffset, 0.0, 1.0).rgb; | |
color.rgb = map(color.rgb, 0.0, 1.0, u_blacks / 2.0, 1.0 + u_whites / 2.0); | |
color = clamp(color, 0.0, 1.0); | |
color.rgb *= color.a; // Reset premultiplied alpha | |
if (u_clarity != 0.0) { | |
color = unpremultiply(color); | |
// L = Left, R = Right, C = Center, T = Top, B = Bottom | |
vec4 colLB = texture2D(u_image, v_texCoord + vec2(-u_pixelDimension.x, -u_pixelDimension.y)); | |
vec4 colLC = texture2D(u_image, v_texCoord + vec2(-u_pixelDimension.x, 0.0)); | |
vec4 colLT = texture2D(u_image, v_texCoord + vec2(-u_pixelDimension.x, u_pixelDimension.y)); | |
vec4 colCL = texture2D(u_image, v_texCoord + vec2( 0.0, -u_pixelDimension.y)); | |
vec4 colCR = texture2D(u_image, v_texCoord + vec2( 0.0, u_pixelDimension.y)); | |
vec4 colRB = texture2D(u_image, v_texCoord + vec2( u_pixelDimension.x, -u_pixelDimension.y)); | |
vec4 colRC = texture2D(u_image, v_texCoord + vec2( u_pixelDimension.x, 0.0)); | |
vec4 colRT = texture2D(u_image, v_texCoord + vec2( u_pixelDimension.x, u_pixelDimension.y)); | |
vec4 mergedColor = color; | |
mergedColor.rgb += unpremultiply(colLB).rgb + unpremultiply(colLC).rgb + unpremultiply(colLT).rgb; | |
mergedColor.rgb += unpremultiply(colCL).rgb + unpremultiply(colCR).rgb; | |
mergedColor.rgb += unpremultiply(colRB).rgb + unpremultiply(colRC).rgb + unpremultiply(colRT).rgb; | |
mergedColor /= 9.0; | |
float grayValue = clamp(color.r * 0.3 + color.g * 0.59 + color.b * 0.1, 0.111111, 0.999999); | |
// 1.0 and 0.0 result in white not black, therefore we clamp | |
// Here we create a function that will map values below 0.1 to 0. Values above 0.2 will be mapped to 1, | |
// and for values between 0.1 and 0.2 it will produce a gradient. | |
// The funtion is mirror at 0.5, meaning values between 0.8 and 0.9 will result in a decending gradient. | |
// And values above 0.9 will be mapped to 0. | |
float frequenceFactor = min(smoothstep(1.0 - grayValue, 0.0, 0.11), smoothstep(grayValue, 0.0, 0.11)); | |
// here we apply the high pass filter. Its strength is determined by the uniform , | |
// and the frequence factor. That means the only the mid tones are affected by this filter. | |
// Clarity input is ranging from -1 to 1. But we want to strengthen the effect. | |
// Therefore we see this little magic number '3.7'. | |
color.rgb = clamp(color + clamp((color - mergedColor) * u_clarity * 3.7 * frequenceFactor, 0.0, 10.0), 0.0, 1.0).rgb; | |
// apply exposure but only to the mid tones. | |
color.rgb = color.rgb * pow(2.0, u_clarity * 0.27 * frequenceFactor); | |
// apply contrast and desaturation matrix | |
color.rgb = clamp(color * u_colorMatrix + u_colorOffset, 0.0, 1.0).rgb; | |
color.rgb *= color.a; // Premultiply alpha | |
color = clamp(color, 0.0, 1.0); | |
} | |
if(u_temperature != 0.0){ | |
float temperature = u_temperature; | |
const float tint = 0.0; | |
vec4 source = color; | |
source.rgb /= max(source.a, EPSILON); // Revert premultiplied alpha | |
vec3 yiq = RGBtoYIQ * source.rgb; | |
yiq.b = clamp(yiq.b + tint*0.5226*0.1, -0.5226, 0.5226); | |
vec3 rgb = YIQtoRGB * yiq; | |
vec3 processed = mix( | |
(1.0 - 2.0 * (1.0 - rgb) * (1.0 - warmFilter)), | |
(2.0 * rgb * warmFilter), | |
vec3(rgb.r < 0.5, rgb.g < 0.5, rgb.b < 0.5) | |
); | |
color = vec4(mix(rgb, processed, temperature), source.a); | |
color.rgb *= color.a; // Premultiply alpha again | |
} | |
if (u_sharpness != 0.0){ | |
float factor = mix(0.2, -1.0, float(u_sharpness > 0.0)); | |
vec4 sharpenedColor = mix(0.2, 5.0, float(u_sharpness > 0.0)) * color; | |
sharpenedColor += factor * clamp(texture2D(u_image, v_texCoord + u_pixelDimension * vec2(-1.0, 0.0)), 0.0, 1.0); | |
sharpenedColor += factor * clamp(texture2D(u_image, v_texCoord + u_pixelDimension * vec2( 0.0, -1.0)), 0.0, 1.0); | |
sharpenedColor += factor * clamp(texture2D(u_image, v_texCoord + u_pixelDimension * vec2( 0.0, 1.0)), 0.0, 1.0); | |
sharpenedColor += factor * clamp(texture2D(u_image, v_texCoord + u_pixelDimension * vec2( 1.0, 0.0)), 0.0, 1.0); | |
color.rgb /= max(color.a, EPSILON); // unpremultiply | |
sharpenedColor.rgb /= max(sharpenedColor.a, EPSILON); // unpremultiply | |
sharpenedColor = clamp(sharpenedColor, 0.0, 1.0); | |
color = clamp(mix(color, sharpenedColor, abs(u_sharpness)), 0.0, 1.0); | |
color = vec4(color.rgb * color.a, color.a); // premultiply | |
} | |
gl_FragColor = color; | |
} | |
""" | |
# | |
# Compile shaders | |
shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertex_shader, GL_VERTEX_SHADER), | |
OpenGL.GL.shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER)) | |
# VBO | |
v_b_o = glGenBuffers(1) | |
glBindBuffer(GL_ARRAY_BUFFER, v_b_o) | |
glBufferData(GL_ARRAY_BUFFER, quad.itemsize * | |
len(quad), quad, GL_STATIC_DRAW) | |
# EBO | |
e_b_o = glGenBuffers(1) | |
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, e_b_o) | |
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.itemsize * | |
len(indices), indices, GL_STATIC_DRAW) | |
# Configure positions of initial data | |
# Configure positions of initial data | |
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * | |
sizeof(GLfloat), ctypes.c_void_p(0)) | |
glEnableVertexAttribArray(0) | |
# Configure texture coordinates of initial data | |
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * | |
sizeof(GLfloat), ctypes.c_void_p(8)) | |
glEnableVertexAttribArray(1) | |
# Texture | |
texture = glGenTextures(1) | |
# Bind texture | |
glBindTexture(GL_TEXTURE_2D, texture) | |
# Texture wrapping params | |
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) | |
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) | |
# Texture filtering params | |
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) | |
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) | |
# | |
# Open image | |
# | |
# img_data = numpy.array(list(image.getdata()), numpy.uint8) | |
# | |
# flipped_image = image.transpose(Image.FLIP_TOP_BOTTOM) | |
# img_data = flipped_image.convert("RGBA").tobytes() | |
# | |
img_data = image.convert("RGBA").tobytes() | |
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.width, | |
image.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, img_data) | |
# print(image.width, image.height) | |
# | |
# Create render buffer with size (image.width x image.height) | |
rb_obj = glGenRenderbuffers(1) | |
glBindRenderbuffer(GL_RENDERBUFFER, rb_obj) | |
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, image.width, image.height) | |
# Create frame buffer | |
fb_obj = glGenFramebuffers(1) | |
glBindFramebuffer(GL_FRAMEBUFFER, fb_obj) | |
glFramebufferRenderbuffer( | |
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rb_obj) | |
# Check frame buffer (that simple buffer should not be an issue) | |
status = glCheckFramebufferStatus(GL_FRAMEBUFFER) | |
if status != GL_FRAMEBUFFER_COMPLETE: | |
print("incomplete framebuffer object") | |
# | |
# Install program | |
glUseProgram(shader) | |
set_options(exposure, saturation, contrast, brightness, gamma, shadows, highlights, whites, blacks, | |
clarity, temperature, sharpness, shader, image.width, image.height) | |
# Bind framebuffer and set viewport size | |
glBindFramebuffer(GL_FRAMEBUFFER, fb_obj) | |
glViewport(0, 0, image.width, image.height) | |
# Draw the quad which covers the entire viewport | |
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None) | |
# | |
# PNG | |
# Read the data and create the image | |
image_buffer = glReadPixels( | |
0, 0, image.width, image.height, GL_RGBA, GL_UNSIGNED_BYTE) | |
image_out = numpy.frombuffer(image_buffer, dtype=numpy.uint8) | |
image_out = image_out.reshape(image.height, image.width, 4) | |
# glfw.terminate() | |
img = Image.fromarray(image_out, 'RGBA') | |
buffered = BytesIO() | |
img.save(buffered, format="PNG") | |
img_str = base64.b64encode(buffered.getvalue()) | |
return img_str | |
if __name__ == "__main__": | |
image = Image.open("/Users/planningo/Downloads/download.jpeg") | |
image_enhance(image, exposure=0, saturation=0, contrast=0, brightness=0, gamma=1, shadows=0, | |
highlights=0, blacks=0, whites=0, clarity=0, temperature=-1, sharpness=1) | |