This paper records the process of using sobel operator for edge detection to realize the post-processing effect of unity stroking screen (Learn by "Introduction to unity shader")
unity realizes the screen post-processing effect as follows:
1. First, add a script for screen post-processing in the camera. The script needs to check whether a series of conditions are met, such as whether the current platform supports rendering texture and screen effects, and whether it supports the current unity shader. In order to improve the reusability of code, we also created a base class to detect conditions and inherit the script to achieve the effect from this base class.
Base class posteffectbase cs
using UnityEngine; using System.Collections; [ExecuteInEditMode]//Support scripts to run in edit mode [RequireComponent (typeof(Camera))] public class PostEffectsBase : MonoBehaviour { //protected void Start() //{ // CheckResources(); //} // Called when start //protected void CheckResources() //{ // bool isSupported = CheckSupport(); // if (isSupported == false) // { // NotSupported(); // } //} // Checking platform support now does not need to be detected, and always returns true //protected bool CheckSupport() { // if (SystemInfo.supportsImageEffects == false) { // Debug.LogWarning("This platform does not support image effects."); // return false; // } // return true; //} // Called when the platform doesn't support this effect //protected void NotSupported() { // enabled = false; //} // Called when need to create the material used by this effect protected Material CheckShaderAndCreateMaterial(Shader shader, Material material) { if (shader == null) { return null; } //if (shader.isSupported && material && material.shader == shader) // return material; if (material && material.shader == shader) return material; //if (!shader.isSupported) { // return null; //} else { material = new Material(shader); material.hideFlags = HideFlags.DontSave; if (material) return material; else return null; } } }
Edge detection script Edgedetection cs
using UnityEngine; using System.Collections; public class EdgeDetection : PostEffectsBase { public Shader edgeDetectShader; private Material edgeDetectMaterial = null; public Material material { get { edgeDetectMaterial = CheckShaderAndCreateMaterial(edgeDetectShader, edgeDetectMaterial); return edgeDetectMaterial; } } [Range(0.0f, 1.0f)] public float edgesOnly = 0.0f; //0 - 1 original image - edge public Color edgeColor = Color.black; //Edge color public Color backgroundColor = Color.white; //background color //src -- source texture dest -- target texture void OnRenderImage (RenderTexture src, RenderTexture dest) { if (material != null) { material.SetFloat("_EdgeOnly", edgesOnly); material.SetColor("_EdgeColor", edgeColor); material.SetColor("_BackgroundColor", backgroundColor); //The current rendered image is stored in the first parameter, and the texture corresponding to the second parameter is transferred to the material Graphics.Blit(src, dest, material); } else { Graphics.Blit(src, dest); } } }
edgesOnly: used to adjust the mixing weight of the edge and the source image. When edgesOnly is 1, only the edge will be displayed, and when edgesOnly is 0, it will be superimposed on the source rendered image
OnRenderImage: it is an interface provided by unity to facilitate us to directly capture the rendered screen image. In this function, we usually use graphics BLIT function to complete the processing of rendered texture
Blit: its declaration is as follows: public static void Blit(Texture src, RenderTexture dest, Material mat, int pass=-1)
src corresponds to the source texture,
dest is the target texture,
mat is the material we use. It will pass the src texture to the Shader named_ Texture properties of MainTex
Pass defaults to - 1, which means that it will call all passes in Shader in turn
2. Create a shader to process the rendered texture. The CheckShaderAndCreateMaterial method provided by the base class will automatically return a material using the shader
Edge Detection.shader
Shader "MyShader/Edge Detection" { Properties { _MainTex ("Base (RGB)", 2D) = "white" {} _EdgeOnly ("Edge Only", Float) = 1.0 _EdgeColor ("Edge Color", Color) = (0, 0, 0, 1) _BackgroundColor ("Background Color", Color) = (1, 1, 1, 1) } SubShader { Pass { //Post rendering settings for standard screens //Turn off depth writing to prevent blocking objects being rendered behind it ZTest Always Cull Off ZWrite Off CGPROGRAM #include "UnityCG.cginc" #pragma vertex vert #pragma fragment fragSobel sampler2D _MainTex; uniform half4 _MainTex_TexelSize;//Access the size of each texture element corresponding to a texture. It is used to calculate the texture coordinates of each adjacent area fixed _EdgeOnly; fixed4 _EdgeColor; fixed4 _BackgroundColor; struct v2f { float4 pos : SV_POSITION; half2 uv[9] : TEXCOORD0; }; //appdata_img is a built-in structure for unity, which contains a vertex and a texture information v2f vert(appdata_img v) { v2f o; o.pos = UnityObjectToClipPos(v.vertex); half2 uv = v.texcoord; //Define a texture array with dimension of 9, corresponding to 9 texture coordinates required when sampling with Sobel operator o.uv[0] = uv + _MainTex_TexelSize.xy * half2(-1, -1); o.uv[1] = uv + _MainTex_TexelSize.xy * half2(0, -1); o.uv[2] = uv + _MainTex_TexelSize.xy * half2(1, -1); o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1, 0); o.uv[4] = uv + _MainTex_TexelSize.xy * half2(0, 0); o.uv[5] = uv + _MainTex_TexelSize.xy * half2(1, 0); o.uv[6] = uv + _MainTex_TexelSize.xy * half2(-1, 1); o.uv[7] = uv + _MainTex_TexelSize.xy * half2(0, 1); o.uv[8] = uv + _MainTex_TexelSize.xy * half2(1, 1); return o; } //Brightness information fixed luminance(fixed4 color) { return 0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b; } half Sobel(v2f i) { const half Gx[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; const half Gy[9] = {-1, -2, -1, 0, 0, 0, 1, 2, 1}; half texColor; half edgeX = 0; half edgeY = 0; //In the convolution operation, 9 pixels are sampled in turn, their brightness values are calculated, multiplied by the corresponding weight in the convolution kernel Gx Gy, and then superimposed on their respective gradients for (int it = 0; it < 9; it++) { texColor = luminance(tex2D(_MainTex, i.uv[it])); edgeX += texColor * Gx[it]; edgeY += texColor * Gy[it]; } //1 subtract the absolute values of the gradient values in the horizontal and vertical directions to get the edge. The smaller the edge, the more likely it is to be an edge half edge = 1 - abs(edgeX) - abs(edgeY); return edge; } fixed4 fragSobel(v2f i) : SV_Target { half edge = Sobel(i); //lerp(from,to,t) = from + (to - from) *t fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[4]), edge); fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackgroundColor, edge); return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly); } ENDCG } } FallBack Off }
Finally, the three Lerp operations of fragSobel are a little difficult to understand
First, make clear the mathematical meaning of lerp
lerp (from,to,t) = from + (to - from)*t, that is, the output results from from to with T from 0 to 1
From the edge obtained from the previous convolution, the smaller the edge, the more likely it is to be an edge
The parameter of the first withEdgeColor function lerp is_ The sampling of EdgeColor and screen source image, that is, the change of lerp from 0-1, from the graph with edge to the graph without edge
The second parameter of the onlyEdgeColor function lerp is transformed into the background color (white by default), that is, when lerp changes from 0-1, it changes from a graph with only edges to a graph with only background color
The lerp parameter of the last return is the return value of the first two lerps. When the lerp changes from 0-1, it changes from a graph with edges to a graph with edges only
3. After writing the code and returning to the editor, click Edgedetection CS script panel drag edgeDetectShader into the public variable
The display is as follows: