summaryrefslogtreecommitdiff
path: root/config/picom/shaders/cube.glsl
blob: 8988a7d6817206d0c5d206666dbeb0791e2bf738 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
#version 430
#define PI 3.14159265

// These shaders work by using a pinhole camera and raycasting
// The window 3d objects will always be (somewhat) centered at (0, 0, 0)
struct pinhole_camera
{
    float focal_offset; // Distance along the Z axis between the camera 
                        // center and the focal point. Use negative values
                        // so the image doesn't flip
                        // This kinda works like FOV in games

    // Transformations 
    // Use these to modify the coordinate system of the camera plane
    vec3 rotations; // Rotations in radians around each axis 
                    // The camera plane rotates around 
                    // its center point, not the origin

    vec3 translations; // Translations in pixels along each axis

    vec3 deformations; // Deforms the camera. Higher values on each axis
                       // means the window will be squashed in that axis

    // ---------------------------------------------------------------// 
    
    // "Aftervalues" 
    // These will be set later with setup_camera(), leave them as 0
    vec3 base_x;
    vec3 base_y;
    vec3 base_z;
    vec3 center_point;
    vec3 focal_point;
};

in vec2 texcoord;             // texture coordinate of the fragment

uniform sampler2D tex;        // texture of the window


uniform float time; // Time in miliseconds.
      
float time_cyclic = mod(time/10000,2); // Like time, but in seconds and resets to 
                                       // 0 when it hits 2. Useful for using it in 
                                       // periodic functions like cos and sine

// Time variables can be used to change transformations over time


ivec2 window_size = textureSize(tex, 0); // Size of the window

float window_diagonal = length(window_size); // Diagonal of the window
                                             
int wss = min(window_size.x, window_size.y); // Window smallest side, useful when squaring windows
// Try to keep focal offset and translations proportional to window_size components 
// or window_diagonal as you see fit

pinhole_camera camera = 
pinhole_camera(-window_size.y/2,   // Focal offset
               vec3(0,0,0), // Rotations
               vec3(0,0,0), // Translations
               vec3(1,1,1), // Deformations
               // Leave the rest as 0
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0));

// Here are some presets you can use

// Moves the camera up and down
pinhole_camera bobbing = 
pinhole_camera(-window_size.y/2,
               vec3(0,0,0),
               vec3(0,cos(time_cyclic*PI)*window_size.y/16,-window_size.y/4),
               vec3(1,1,1),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0));

// Rotates camera around the origin
// Makes the window rotate around the Y axis from the camera's POV
// (if the window is centered)
pinhole_camera rotate_around_origin = 
pinhole_camera(-wss,
               vec3(PI/6*sin(2*time_cyclic*PI),-time_cyclic*PI-PI/2,0),
               vec3(cos(time_cyclic*PI)*wss,
                    wss/2*sin(2*time_cyclic*PI),
                    sin(time_cyclic*PI)*wss),
               vec3(1,1,1),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0));

// Rotate camera around its center
pinhole_camera rotate_around_itself = 
pinhole_camera(-wss,
               vec3(0,-time_cyclic*PI-PI/2,0),
               vec3(0,0,-wss),
               vec3(1,1,1),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0),
               vec3(0));

// Here you can select the preset to use
pinhole_camera window_cam = rotate_around_origin;



ivec2 window_center = ivec2(window_size.x/2, window_size.y/2); 

// Default window post-processing:
// 1) invert color
// 2) opacity / transparency
// 3) max-brightness clamping
// 4) rounded corners
vec4 default_post_processing(vec4 c);

// Sets up a camera by applying transformations and 
// calculating xyz vector basis 
pinhole_camera setup_camera(pinhole_camera camera)
{
    // Apply translations
    camera.center_point += camera.translations;

    // Apply rotations 
    // We initialize our vector basis as normalized vectors
    // in each axis * our deformations vector
    camera.base_x = vec3(camera.deformations.x, 0, 0);
    camera.base_y = vec3(0, camera.deformations.y, 0);
    camera.base_z = vec3(0, 0, camera.deformations.z);


    // Then we rotate them around following our rotations vector:
    // First save these values to avoid redundancy
    float cosx = cos(camera.rotations.x);
    float cosy = cos(camera.rotations.y);
    float cosz = cos(camera.rotations.z);
    float sinx = sin(camera.rotations.x);
    float siny = sin(camera.rotations.y);
    float sinz = sin(camera.rotations.z);
    
    // Declare a buffer vector we will use to apply multiple changes at once
    vec3 tmp = vec3(0);

    // Rotations for base_x:
    tmp = camera.base_x;
    // X axis:
    tmp.y =  camera.base_x.y * cosx - camera.base_x.z * sinx;
    tmp.z =  camera.base_x.y * sinx + camera.base_x.z * cosx;
    camera.base_x = tmp;
    // Y axis:
    tmp.x =  camera.base_x.x * cosy + camera.base_x.z * siny;
    tmp.z = -camera.base_x.x * siny + camera.base_x.z * cosy;
    camera.base_x = tmp;
    // Z axis:
    tmp.x =  camera.base_x.x * cosz - camera.base_x.y * sinz;
    tmp.y =  camera.base_x.x * sinz + camera.base_x.y * cosz;
    camera.base_x = tmp;

    // Rotations for base_y:
    tmp = camera.base_y;
    // X axis:
    tmp.y =  camera.base_y.y * cosx - camera.base_y.z * sinx;
    tmp.z =  camera.base_y.y * sinx + camera.base_y.z * cosx;
    camera.base_y = tmp;
    // Y axis:
    tmp.x =  camera.base_y.x * cosy + camera.base_y.z * siny;
    tmp.z = -camera.base_y.x * siny + camera.base_y.z * cosy;
    camera.base_y = tmp;
    // Z axis:
    tmp.x =  camera.base_y.x * cosz - camera.base_y.y * sinz;
    tmp.y =  camera.base_y.x * sinz + camera.base_y.y * cosz;
    camera.base_y = tmp;

    // Rotations for base_z: 
    tmp = camera.base_z;
    // X axis:
    tmp.y =  camera.base_z.y * cosx - camera.base_z.z * sinx;
    tmp.z =  camera.base_z.y * sinx + camera.base_z.z * cosx;
    camera.base_z = tmp;
    // Y axis:
    tmp.x =  camera.base_z.x * cosy + camera.base_z.z * siny;
    tmp.z = -camera.base_z.x * siny + camera.base_z.z * cosy;
    camera.base_z = tmp;
    // Z axis:
    tmp.x =  camera.base_z.x * cosz - camera.base_z.y * sinz;
    tmp.y =  camera.base_z.x * sinz + camera.base_z.y * cosz;
    camera.base_z = tmp;

    // Now that we have our transformed 3d orthonormal base 
    // we can calculate our focal point 
    camera.focal_point = camera.center_point + camera.base_z * camera.focal_offset;

    // Return our set up camera
    return camera;
}

// Gets a pixel from the end of a ray projected to an axis
vec4 get_pixel_from_projection(float t, int face, pinhole_camera camera, vec3 focal_vector)
{
    // If the point we end up in is behind our camera, don't "render" it
    if (t < 1)
    {
        return vec4(0);
    }

    // Then we multiply our focal vector by t and add our focal point to it
    // to end up in a point inside the window plane 
    vec3 intersection = focal_vector * t + camera.focal_point;
    

    // Save necessary coordinates
    // (different cube faces need different coords)
    vec2 cam_coords;
    switch (face) 
    {
        case 0:
            cam_coords = intersection.xy;
            break;
        case 1:
            cam_coords = intersection.xy;
            break;
        case 2:
            cam_coords = intersection.zy;
            break;
        case 3:
            cam_coords = intersection.zy;
            break;
        case 4:
            cam_coords = intersection.zx;
            break;
        case 5:
            cam_coords = intersection.zx;
            break;
    }
    
    if (window_size.x > window_size.y)
    {
        cam_coords.x /= window_size.y/float(window_size.x);
        cam_coords.xy += window_center.xy;
    }
    else if (window_size.x < window_size.y)
    {
        cam_coords.y /= window_size.x/float(window_size.y);
        cam_coords.xy += window_center.xy;
    }
    // If pixel is outside of our window region
    // return a completely transparent color
    if (cam_coords.x >=window_size.x-1 || 
        cam_coords.y >=window_size.y-1 ||
        cam_coords.x <=0 || cam_coords.y <=0)
    {
        return vec4(0);
    }

    // Fetch the pixel
    vec4 pixel = texelFetch(tex, ivec2(cam_coords), 0);

    return pixel;
}

// Combines colors using alpha
// Got this from https://stackoverflow.com/questions/64701745/how-to-blend-colours-with-transparency
// Not sure how it works honestly lol
vec4 alpha_composite(vec4 color1, vec4 color2)
{
    float ar = color1.w + color2.w - (color1.w * color2.w);
    float asr = color2.w / ar;
    float a1 = 1 - asr;
    float a2 = asr * (1 - color1.w);
    float ab = asr * color1.w;
    vec4 outcolor;
    outcolor.xyz = color1.xyz * a1 + color2.xyz * a2 + color2.xyz * ab;
    outcolor.w = ar;
    return outcolor;
}

// Gets a pixel through the camera using coords as coordinates in
// the camera plane
vec4 get_pixel_through_camera(vec2 coords, pinhole_camera camera)
{
    // Offset coords
    coords -= window_center;

    // Find the pixel 3d position using the camera vector basis
    vec3 pixel_3dposition =   camera.center_point 
                            + coords.x * camera.base_x 
                            + coords.y * camera.base_y;

    // Get the vector going from the focal point to the pixel in 3d sapace
    vec3 focal_vector = pixel_3dposition - camera.focal_point;

    // We need 6 planes, one for each face of the cube, they all follow the plane EQ
    // ax + by + cz + d
    float a[] = {0,0,
                 1,1,
                 0,0};
    float b[] = {0,0,
                 0,0,
                 1,1};
    float c[] = {1,1,
                 0,0,
                 0,0};
    float d[] = {-wss/2.0,wss/2.0,
                 -wss/2.0,wss/2.0,
                 -wss/2.0,wss/2.0};

    // Then there's a line going from our focal point to each of the planes 
    // which we can describe as:
    // x(t) = focal_point.x + focal_vector.x * t
    // y(t) = focal_point.y + focal_vector.y * t
    // z(t) = focal_point.z + focal_vector.z * t
    // We substitute x, y and z with x(t), y(t) and z(t) in the plane EQ
    // Solving for t we get:
    vec2 t[6]; // we use a vec2 to also store the plane that was hit
    for (int i = 0; i < t.length(); i++)
    {
        t[i].x = (d[i] 
                - a[i]*camera.focal_point.x 
                - b[i]*camera.focal_point.y 
                - c[i]*camera.focal_point.z)
            / (a[i]*focal_vector.x 
                    + b[i]*focal_vector.y 
                    + c[i]*focal_vector.z);
        t[i].y = i;
    }
    
    // Bubble sort to know which intersections happen first
    for (int i = 0; i < t.length(); i++)
    {
        for (int j = 0; j < t.length(); j++)
        {
            if (t [j].x > t[j+1].x)
            {
                vec2 tmp = t[j];
                t[j] = t[j+1];
                t[j+1] = tmp;
            }
        }
    }

    // Then we go through each one of the intersections in order 
    // and mix pixels together using alpha
    vec4 blended_pixels = vec4(0);
    for (int i = 0; i < t.length(); i++)
    {
        // We get the pixel through projection
        vec4 projection_pixel = get_pixel_from_projection(t[i].x, 
                                                          int(t[i].y),
                                                          camera,
                                                          focal_vector);
        // Only blend non fully transparent pixels
        if (projection_pixel.w > 0.0)
        {
            // Blend the pixel using alpha
            blended_pixels = alpha_composite(projection_pixel, blended_pixels);
        }
    }
    return blended_pixels;
}

// Main function
vec4 window_shader() {
    pinhole_camera transformed_cam = setup_camera(window_cam);
    return(get_pixel_through_camera(texcoord, transformed_cam));
}