暂无描述
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. #ifndef UNIVERSAL_SSAO_INCLUDED
  2. #define UNIVERSAL_SSAO_INCLUDED
  3. // Includes
  4. #include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
  5. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.hlsl"
  6. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
  7. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareNormalsTexture.hlsl"
  8. #include "Packages/com.unity.render-pipelines.core/ShaderLibrary/FoveatedRendering.hlsl"
  9. // Textures & Samplers
  10. TEXTURE2D_HALF(_BlueNoiseTexture);
  11. TEXTURE2D_X_HALF(_ScreenSpaceOcclusionTexture);
  12. SAMPLER(sampler_BlitTexture);
  13. // Params
  14. half4 _BlurOffset;
  15. half4 _SSAOParams;
  16. float4 _CameraViewTopLeftCorner[2];
  17. float4x4 _CameraViewProjections[2]; // This is different from UNITY_MATRIX_VP (platform-agnostic projection matrix is used). Handle both non-XR and XR modes.
  18. float4 _SourceSize;
  19. float4 _ProjectionParams2;
  20. float4 _CameraViewXExtent[2];
  21. float4 _CameraViewYExtent[2];
  22. float4 _CameraViewZExtent[2];
  23. // SSAO Settings
  24. #define INTENSITY _SSAOParams.x
  25. #define RADIUS _SSAOParams.y
  26. #define DOWNSAMPLE _SSAOParams.z
  27. #define FALLOFF _SSAOParams.w
  28. #if defined(_BLUE_NOISE)
  29. half4 _SSAOBlueNoiseParams;
  30. #define BlueNoiseScale _SSAOBlueNoiseParams.xy
  31. #define BlueNoiseOffset _SSAOBlueNoiseParams.zw
  32. #endif
  33. #if defined(_SAMPLE_COUNT_HIGH)
  34. static const int SAMPLE_COUNT = 12;
  35. #elif defined(_SAMPLE_COUNT_MEDIUM)
  36. static const int SAMPLE_COUNT = 8;
  37. #else
  38. static const int SAMPLE_COUNT = 4;
  39. #endif
  40. // Hardcoded random UV values that improves performance.
  41. // The values were taken from this function:
  42. // r = frac(43758.5453 * sin( dot(float2(12.9898, 78.233), uv)) ));
  43. // Indices 0 to 19 are for u = 0.0
  44. // Indices 20 to 39 are for u = 1.0
  45. static half SSAORandomUV[40] =
  46. {
  47. 0.00000000, // 00
  48. 0.33984375, // 01
  49. 0.75390625, // 02
  50. 0.56640625, // 03
  51. 0.98437500, // 04
  52. 0.07421875, // 05
  53. 0.23828125, // 06
  54. 0.64062500, // 07
  55. 0.35937500, // 08
  56. 0.50781250, // 09
  57. 0.38281250, // 10
  58. 0.98437500, // 11
  59. 0.17578125, // 12
  60. 0.53906250, // 13
  61. 0.28515625, // 14
  62. 0.23137260, // 15
  63. 0.45882360, // 16
  64. 0.54117650, // 17
  65. 0.12941180, // 18
  66. 0.64313730, // 19
  67. 0.92968750, // 20
  68. 0.76171875, // 21
  69. 0.13333330, // 22
  70. 0.01562500, // 23
  71. 0.00000000, // 24
  72. 0.10546875, // 25
  73. 0.64062500, // 26
  74. 0.74609375, // 27
  75. 0.67968750, // 28
  76. 0.35156250, // 29
  77. 0.49218750, // 30
  78. 0.12500000, // 31
  79. 0.26562500, // 32
  80. 0.62500000, // 33
  81. 0.44531250, // 34
  82. 0.17647060, // 35
  83. 0.44705890, // 36
  84. 0.93333340, // 37
  85. 0.87058830, // 38
  86. 0.56862750, // 39
  87. };
  88. // Function defines
  89. #define SCREEN_PARAMS GetScaledScreenParams()
  90. #define SAMPLE_BASEMAP(uv) half4(SAMPLE_TEXTURE2D_X(_BlitTexture, sampler_BlitTexture, UnityStereoTransformScreenSpaceTex(uv)));
  91. #define SAMPLE_BASEMAP_R(uv) half(SAMPLE_TEXTURE2D_X(_BlitTexture, sampler_BlitTexture, UnityStereoTransformScreenSpaceTex(uv)).r);
  92. #define SAMPLE_BLUE_NOISE(uv) SAMPLE_TEXTURE2D(_BlueNoiseTexture, sampler_PointRepeat, UnityStereoTransformScreenSpaceTex(uv)).a;
  93. // Constants
  94. // kContrast determines the contrast of occlusion. This allows users to control over/under
  95. // occlusion. At the moment, this is not exposed to the editor because it's rarely useful.
  96. // The range is between 0 and 1.
  97. static const half kContrast = half(0.6);
  98. // The constant below controls the geometry-awareness of the bilateral
  99. // filter. The higher value, the more sensitive it is.
  100. static const half kGeometryCoeff = half(0.8);
  101. // The constants below are used in the AO estimator. Beta is mainly used for suppressing
  102. // self-shadowing noise, and Epsilon is used to prevent calculation underflow. See the paper
  103. // (Morgan 2011 https://casual-effects.com/research/McGuire2011AlchemyAO/index.html)
  104. // for further details of these constants.
  105. static const half kBeta = half(0.004);
  106. static const half kEpsilon = half(0.0001);
  107. static const float SKY_DEPTH_VALUE = 0.00001;
  108. static const half HALF_POINT_ONE = half(0.1);
  109. static const half HALF_MINUS_ONE = half(-1.0);
  110. static const half HALF_ZERO = half(0.0);
  111. static const half HALF_HALF = half(0.5);
  112. static const half HALF_ONE = half(1.0);
  113. static const half4 HALF4_ONE = half4(1.0, 1.0, 1.0, 1.0);
  114. static const half HALF_TWO = half(2.0);
  115. static const half HALF_TWO_PI = half(6.28318530717958647693);
  116. static const half HALF_FOUR = half(4.0);
  117. static const half HALF_NINE = half(9.0);
  118. static const half HALF_HUNDRED = half(100.0);
  119. #if defined(USING_STEREO_MATRICES)
  120. #define unity_eyeIndex unity_StereoEyeIndex
  121. #else
  122. #define unity_eyeIndex 0
  123. #endif
  124. half4 PackAONormal(half ao, half3 n)
  125. {
  126. n *= HALF_HALF;
  127. n += HALF_HALF;
  128. return half4(ao, n);
  129. }
  130. half3 GetPackedNormal(half4 p)
  131. {
  132. return p.gba * HALF_TWO - HALF_ONE;
  133. }
  134. half GetPackedAO(half4 p)
  135. {
  136. return p.r;
  137. }
  138. half EncodeAO(half x)
  139. {
  140. #if UNITY_COLORSPACE_GAMMA
  141. return half(1.0 - max(LinearToSRGB(1.0 - saturate(x)), 0.0));
  142. #else
  143. return x;
  144. #endif
  145. }
  146. half CompareNormal(half3 d1, half3 d2)
  147. {
  148. return smoothstep(kGeometryCoeff, HALF_ONE, dot(d1, d2));
  149. }
  150. float2 GetScreenSpacePosition(float2 uv)
  151. {
  152. return float2(uv * SCREEN_PARAMS.xy * DOWNSAMPLE);
  153. }
  154. // Pseudo random number generator
  155. half GetRandomVal(half u, half sampleIndex)
  156. {
  157. return SSAORandomUV[u * 20 + sampleIndex];
  158. }
  159. // Sample point picker
  160. half3 PickSamplePoint(float2 uv, int sampleIndex, half sampleIndexHalf, half rcpSampleCount, half3 normal_o, float2 pixelDensity)
  161. {
  162. #if defined(_BLUE_NOISE)
  163. const half lerpVal = sampleIndexHalf * rcpSampleCount;
  164. const half noise = SAMPLE_BLUE_NOISE(((uv + BlueNoiseOffset) * BlueNoiseScale) + lerpVal);
  165. const half u = frac(GetRandomVal(HALF_ZERO, sampleIndexHalf).x + noise) * HALF_TWO - HALF_ONE;
  166. const half theta = (GetRandomVal(HALF_ONE, sampleIndexHalf).x + noise) * HALF_TWO_PI * HALF_HUNDRED;
  167. const half u2 = half(sqrt(HALF_ONE - u * u));
  168. half3 v = half3(u2 * cos(theta), u2 * sin(theta), u);
  169. v *= (dot(normal_o, v) >= HALF_ZERO) * HALF_TWO - HALF_ONE;
  170. v *= lerp(0.1, 1.0, lerpVal * lerpVal);
  171. #else
  172. const float2 positionSS = GetScreenSpacePosition(uv);
  173. const half noise = half(InterleavedGradientNoise(positionSS, sampleIndex));
  174. const half u = frac(GetRandomVal(HALF_ZERO, sampleIndex) + noise) * HALF_TWO - HALF_ONE;
  175. const half theta = (GetRandomVal(HALF_ONE, sampleIndex) + noise) * HALF_TWO_PI;
  176. const half u2 = half(sqrt(HALF_ONE - u * u));
  177. half3 v = half3(u2 * cos(theta), u2 * sin(theta), u);
  178. v *= sqrt((sampleIndexHalf + HALF_ONE) * rcpSampleCount);
  179. v = faceforward(v, -normal_o, v);
  180. #endif
  181. v *= RADIUS;
  182. v.xy *= pixelDensity;
  183. return v;
  184. }
  185. // For Downsampled SSAO we need to adjust the UV coordinates
  186. // so it hits the center of the pixel inside the depth texture.
  187. // The texelSize multiplier is 1.0 when DOWNSAMPLE is enabled, otherwise 0.0
  188. #define ADJUSTED_DEPTH_UV(uv) uv.xy + ((_CameraDepthTexture_TexelSize.xy * 0.5) * (1.0 - (DOWNSAMPLE - 0.5) * 2.0))
  189. float SampleDepth(float2 uv)
  190. {
  191. return SampleSceneDepth(ADJUSTED_DEPTH_UV(uv.xy));
  192. }
  193. float GetLinearEyeDepth(float rawDepth)
  194. {
  195. #if defined(_ORTHOGRAPHIC)
  196. return LinearDepthToEyeDepth(rawDepth);
  197. #else
  198. return LinearEyeDepth(rawDepth, _ZBufferParams);
  199. #endif
  200. }
  201. float SampleAndGetLinearEyeDepth(float2 uv)
  202. {
  203. const float rawDepth = SampleDepth(uv);
  204. return GetLinearEyeDepth(rawDepth);
  205. }
  206. // This returns a vector in world unit (not a position), from camera to the given point described by uv screen coordinate and depth (in absolute world unit).
  207. half3 ReconstructViewPos(float2 uv, float linearDepth)
  208. {
  209. #if defined(SUPPORTS_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  210. UNITY_BRANCH if (_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  211. {
  212. uv = RemapFoveatedRenderingNonUniformToLinear(uv);
  213. }
  214. #endif
  215. // Screen is y-inverted.
  216. uv.y = 1.0 - uv.y;
  217. // view pos in world space
  218. #if defined(_ORTHOGRAPHIC)
  219. float zScale = linearDepth * _ProjectionParams.w; // divide by far plane
  220. float3 viewPos = _CameraViewTopLeftCorner[unity_eyeIndex].xyz
  221. + _CameraViewXExtent[unity_eyeIndex].xyz * uv.x
  222. + _CameraViewYExtent[unity_eyeIndex].xyz * uv.y
  223. + _CameraViewZExtent[unity_eyeIndex].xyz * zScale;
  224. #else
  225. float zScale = linearDepth * _ProjectionParams2.x; // divide by near plane
  226. float3 viewPos = _CameraViewTopLeftCorner[unity_eyeIndex].xyz
  227. + _CameraViewXExtent[unity_eyeIndex].xyz * uv.x
  228. + _CameraViewYExtent[unity_eyeIndex].xyz * uv.y;
  229. viewPos *= zScale;
  230. #endif
  231. return half3(viewPos);
  232. }
  233. // Try reconstructing normal accurately from depth buffer.
  234. // Low: DDX/DDY on the current pixel
  235. // Medium: 3 taps on each direction | x | * | y |
  236. // High: 5 taps on each direction: | z | x | * | y | w |
  237. // https://atyuwen.github.io/posts/normal-reconstruction/
  238. // https://wickedengine.net/2019/09/22/improved-normal-reconstruction-from-depth/
  239. half3 ReconstructNormal(float2 uv, float linearDepth, float3 vpos, float2 pixelDensity)
  240. {
  241. #if defined(_SOURCE_DEPTH_LOW)
  242. return half3(normalize(cross(ddy(vpos), ddx(vpos))));
  243. #else
  244. float2 delta = float2(_SourceSize.zw * 2.0);
  245. pixelDensity = rcp(pixelDensity);
  246. // Sample the neighbour fragments
  247. float2 lUV = float2(-delta.x, 0.0) * pixelDensity;
  248. float2 rUV = float2(delta.x, 0.0) * pixelDensity;
  249. float2 uUV = float2(0.0, delta.y) * pixelDensity;
  250. float2 dUV = float2(0.0, -delta.y) * pixelDensity;
  251. float3 l1 = float3(uv + lUV, 0.0); l1.z = SampleAndGetLinearEyeDepth(l1.xy); // Left1
  252. float3 r1 = float3(uv + rUV, 0.0); r1.z = SampleAndGetLinearEyeDepth(r1.xy); // Right1
  253. float3 u1 = float3(uv + uUV, 0.0); u1.z = SampleAndGetLinearEyeDepth(u1.xy); // Up1
  254. float3 d1 = float3(uv + dUV, 0.0); d1.z = SampleAndGetLinearEyeDepth(d1.xy); // Down1
  255. // Determine the closest horizontal and vertical pixels...
  256. // horizontal: left = 0.0 right = 1.0
  257. // vertical : down = 0.0 up = 1.0
  258. #if defined(_SOURCE_DEPTH_MEDIUM)
  259. uint closest_horizontal = l1.z > r1.z ? 0 : 1;
  260. uint closest_vertical = d1.z > u1.z ? 0 : 1;
  261. #else
  262. float3 l2 = float3(uv + lUV * 2.0, 0.0); l2.z = SampleAndGetLinearEyeDepth(l2.xy); // Left2
  263. float3 r2 = float3(uv + rUV * 2.0, 0.0); r2.z = SampleAndGetLinearEyeDepth(r2.xy); // Right2
  264. float3 u2 = float3(uv + uUV * 2.0, 0.0); u2.z = SampleAndGetLinearEyeDepth(u2.xy); // Up2
  265. float3 d2 = float3(uv + dUV * 2.0, 0.0); d2.z = SampleAndGetLinearEyeDepth(d2.xy); // Down2
  266. const uint closest_horizontal = abs( (2.0 * l1.z - l2.z) - linearDepth) < abs( (2.0 * r1.z - r2.z) - linearDepth) ? 0 : 1;
  267. const uint closest_vertical = abs( (2.0 * d1.z - d2.z) - linearDepth) < abs( (2.0 * u1.z - u2.z) - linearDepth) ? 0 : 1;
  268. #endif
  269. // Calculate the triangle, in a counter-clockwize order, to
  270. // use based on the closest horizontal and vertical depths.
  271. // h == 0.0 && v == 0.0: p1 = left, p2 = down
  272. // h == 1.0 && v == 0.0: p1 = down, p2 = right
  273. // h == 1.0 && v == 1.0: p1 = right, p2 = up
  274. // h == 0.0 && v == 1.0: p1 = up, p2 = left
  275. // Calculate the view space positions for the three points...
  276. half3 P1;
  277. half3 P2;
  278. if (closest_vertical == 0)
  279. {
  280. P1 = half3(closest_horizontal == 0 ? l1 : d1);
  281. P2 = half3(closest_horizontal == 0 ? d1 : r1);
  282. }
  283. else
  284. {
  285. P1 = half3(closest_horizontal == 0 ? u1 : r1);
  286. P2 = half3(closest_horizontal == 0 ? l1 : u1);
  287. }
  288. // Use the cross product to calculate the normal...
  289. return half3(normalize(cross(ReconstructViewPos(P2.xy, P2.z) - vpos, ReconstructViewPos(P1.xy, P1.z) - vpos)));
  290. #endif
  291. }
  292. half3 SampleNormal(float2 uv, float linearDepth, float2 pixelDensity)
  293. {
  294. #if defined(_SOURCE_DEPTH_NORMALS)
  295. return half3(SampleSceneNormals(uv));
  296. #else
  297. float3 vpos = ReconstructViewPos(uv, linearDepth);
  298. return ReconstructNormal(uv, linearDepth, vpos, pixelDensity);
  299. #endif
  300. }
  301. // Distance-based AO estimator based on Morgan 2011
  302. // "Alchemy screen-space ambient obscurance algorithm"
  303. // http://graphics.cs.williams.edu/papers/AlchemyHPG11/
  304. half4 SSAO(Varyings input) : SV_Target
  305. {
  306. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  307. float2 uv = input.texcoord;
  308. // Early Out for Sky...
  309. float rawDepth_o = SampleDepth(uv);
  310. if (rawDepth_o < SKY_DEPTH_VALUE)
  311. return PackAONormal(HALF_ZERO, HALF_ZERO);
  312. // Early Out for Falloff
  313. float linearDepth_o = GetLinearEyeDepth(rawDepth_o);
  314. half halfLinearDepth_o = half(linearDepth_o);
  315. if (halfLinearDepth_o > FALLOFF)
  316. return PackAONormal(HALF_ZERO, HALF_ZERO);
  317. float2 pixelDensity;
  318. #if defined(SUPPORTS_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  319. UNITY_BRANCH if (_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  320. {
  321. pixelDensity = RemapFoveatedRenderingDensity(RemapFoveatedRenderingNonUniformToLinear(uv));
  322. }
  323. else
  324. #endif
  325. {
  326. pixelDensity = float2(1.0f, 1.0f);
  327. }
  328. // Normal for this fragment
  329. half3 normal_o = SampleNormal(uv, linearDepth_o, pixelDensity);
  330. // View position for this fragment
  331. float3 vpos_o = ReconstructViewPos(uv, linearDepth_o);
  332. // Parameters used in coordinate conversion
  333. half3 camTransform000102 = half3(_CameraViewProjections[unity_eyeIndex]._m00, _CameraViewProjections[unity_eyeIndex]._m01, _CameraViewProjections[unity_eyeIndex]._m02);
  334. half3 camTransform101112 = half3(_CameraViewProjections[unity_eyeIndex]._m10, _CameraViewProjections[unity_eyeIndex]._m11, _CameraViewProjections[unity_eyeIndex]._m12);
  335. const half rcpSampleCount = half(rcp(SAMPLE_COUNT));
  336. half ao = HALF_ZERO;
  337. half sHalf = HALF_MINUS_ONE;
  338. UNITY_UNROLL
  339. for (int s = 0; s < SAMPLE_COUNT; s++)
  340. {
  341. sHalf += HALF_ONE;
  342. // Sample point
  343. half3 v_s1 = PickSamplePoint(uv, s, sHalf, rcpSampleCount, normal_o, pixelDensity);
  344. half3 vpos_s1 = half3(vpos_o + v_s1);
  345. half2 spos_s1 = half2(
  346. camTransform000102.x * vpos_s1.x + camTransform000102.y * vpos_s1.y + camTransform000102.z * vpos_s1.z,
  347. camTransform101112.x * vpos_s1.x + camTransform101112.y * vpos_s1.y + camTransform101112.z * vpos_s1.z
  348. );
  349. half zDist = HALF_ZERO;
  350. #if defined(_ORTHOGRAPHIC)
  351. zDist = halfLinearDepth_o;
  352. half2 uv_s1_01 = saturate((spos_s1 + HALF_ONE) * HALF_HALF);
  353. #else
  354. zDist = half(-dot(UNITY_MATRIX_V[2].xyz, vpos_s1));
  355. half2 uv_s1_01 = saturate(half2(spos_s1 * rcp(zDist) + HALF_ONE) * HALF_HALF);
  356. #endif
  357. #if defined(SUPPORTS_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  358. UNITY_BRANCH if (_FOVEATED_RENDERING_NON_UNIFORM_RASTER)
  359. {
  360. uv_s1_01 = RemapFoveatedRenderingLinearToNonUniform(uv_s1_01);
  361. }
  362. #endif
  363. // Relative depth of the sample point
  364. float rawDepth_s = SampleDepth(uv_s1_01);
  365. float linearDepth_s = GetLinearEyeDepth(rawDepth_s);
  366. // We need to make sure we not use the AO value if the sample point it's outside the radius or if it's the sky...
  367. half halfLinearDepth_s = half(linearDepth_s);
  368. half isInsideRadius = abs(zDist - halfLinearDepth_s) < RADIUS ? 1.0 : 0.0;
  369. isInsideRadius *= rawDepth_s > SKY_DEPTH_VALUE ? 1.0 : 0.0;
  370. // Relative postition of the sample point
  371. half3 v_s2 = half3(ReconstructViewPos(uv_s1_01, linearDepth_s) - vpos_o);
  372. // Estimate the obscurance value
  373. half dotVal = dot(v_s2, normal_o) - kBeta * halfLinearDepth_o;
  374. half a1 = max(dotVal, HALF_ZERO);
  375. half a2 = dot(v_s2, v_s2) + kEpsilon;
  376. ao += a1 * rcp(a2) * isInsideRadius;
  377. }
  378. // Intensity normalization
  379. ao *= RADIUS;
  380. // Calculate falloff...
  381. half falloff = HALF_ONE - halfLinearDepth_o * half(rcp(FALLOFF));
  382. falloff = falloff*falloff;
  383. // Apply contrast + intensity + falloff^2
  384. ao = PositivePow(saturate(ao * INTENSITY * falloff * rcpSampleCount), kContrast);
  385. // Return the packed ao + normals
  386. return PackAONormal(ao, normal_o);
  387. }
  388. // ------------------------------------------------------------------
  389. // Bilateral Blur
  390. // ------------------------------------------------------------------
  391. // Geometry-aware separable bilateral filter
  392. half4 Blur(const float2 uv, const float2 delta) : SV_Target
  393. {
  394. half4 p0 = SAMPLE_BASEMAP(uv );
  395. half4 p1a = SAMPLE_BASEMAP(uv - delta * 1.3846153846);
  396. half4 p1b = SAMPLE_BASEMAP(uv + delta * 1.3846153846);
  397. half4 p2a = SAMPLE_BASEMAP(uv - delta * 3.2307692308);
  398. half4 p2b = SAMPLE_BASEMAP(uv + delta * 3.2307692308);
  399. half3 n0 = GetPackedNormal(p0);
  400. half w0 = half(0.2270270270);
  401. half w1a = CompareNormal(n0, GetPackedNormal(p1a)) * half(0.3162162162);
  402. half w1b = CompareNormal(n0, GetPackedNormal(p1b)) * half(0.3162162162);
  403. half w2a = CompareNormal(n0, GetPackedNormal(p2a)) * half(0.0702702703);
  404. half w2b = CompareNormal(n0, GetPackedNormal(p2b)) * half(0.0702702703);
  405. half s = half(0.0);
  406. s += GetPackedAO(p0) * w0;
  407. s += GetPackedAO(p1a) * w1a;
  408. s += GetPackedAO(p1b) * w1b;
  409. s += GetPackedAO(p2a) * w2a;
  410. s += GetPackedAO(p2b) * w2b;
  411. s *= rcp(w0 + w1a + w1b + w2a + w2b);
  412. return PackAONormal(s, n0);
  413. }
  414. // Geometry-aware bilateral filter (single pass/small kernel)
  415. half BlurSmall(const float2 uv, const float2 delta)
  416. {
  417. half4 p0 = SAMPLE_BASEMAP(uv );
  418. half4 p1 = SAMPLE_BASEMAP(uv + float2(-delta.x, -delta.y));
  419. half4 p2 = SAMPLE_BASEMAP(uv + float2( delta.x, -delta.y));
  420. half4 p3 = SAMPLE_BASEMAP(uv + float2(-delta.x, delta.y));
  421. half4 p4 = SAMPLE_BASEMAP(uv + float2( delta.x, delta.y));
  422. half3 n0 = GetPackedNormal(p0);
  423. half w0 = HALF_ONE;
  424. half w1 = CompareNormal(n0, GetPackedNormal(p1));
  425. half w2 = CompareNormal(n0, GetPackedNormal(p2));
  426. half w3 = CompareNormal(n0, GetPackedNormal(p3));
  427. half w4 = CompareNormal(n0, GetPackedNormal(p4));
  428. half s = HALF_ZERO;
  429. s += GetPackedAO(p0) * w0;
  430. s += GetPackedAO(p1) * w1;
  431. s += GetPackedAO(p2) * w2;
  432. s += GetPackedAO(p3) * w3;
  433. s += GetPackedAO(p4) * w4;
  434. return s *= rcp(w0 + w1 + w2 + w3 + w4);
  435. }
  436. half4 HorizontalBlur(Varyings input) : SV_Target
  437. {
  438. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  439. const float2 uv = input.texcoord;
  440. const float2 delta = float2(_SourceSize.z * rcp(DOWNSAMPLE), 0.0);
  441. return Blur(uv, delta);
  442. }
  443. half4 VerticalBlur(Varyings input) : SV_Target
  444. {
  445. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  446. const float2 uv = input.texcoord;
  447. const float2 delta = float2(0.0, _SourceSize.w * rcp(DOWNSAMPLE));
  448. return Blur(uv, delta);
  449. }
  450. half4 FinalBlur(Varyings input) : SV_Target
  451. {
  452. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  453. const float2 uv = input.texcoord;
  454. const float2 delta = _SourceSize.zw * rcp(DOWNSAMPLE);
  455. return HALF_ONE - BlurSmall(uv, delta );
  456. }
  457. // ------------------------------------------------------------------
  458. // Gaussian Blur
  459. // ------------------------------------------------------------------
  460. // https://software.intel.com/content/www/us/en/develop/blogs/an-investigation-of-fast-real-time-gpu-based-image-blur-algorithms.html
  461. half GaussianBlur(half2 uv, half2 pixelOffset)
  462. {
  463. half colOut = 0;
  464. // Kernel width 7 x 7
  465. const int stepCount = 2;
  466. const half gWeights[stepCount] ={
  467. 0.44908,
  468. 0.05092
  469. };
  470. const half gOffsets[stepCount] ={
  471. 0.53805,
  472. 2.06278
  473. };
  474. UNITY_UNROLL
  475. for( int i = 0; i < stepCount; i++ )
  476. {
  477. half2 texCoordOffset = gOffsets[i] * pixelOffset;
  478. half4 p1 = SAMPLE_BASEMAP(uv + texCoordOffset);
  479. half4 p2 = SAMPLE_BASEMAP(uv - texCoordOffset);
  480. half col = p1.r + p2.r;
  481. colOut += gWeights[i] * col;
  482. }
  483. return colOut;
  484. }
  485. half HorizontalGaussianBlur(Varyings input) : SV_Target
  486. {
  487. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  488. half2 uv = input.texcoord;
  489. half2 delta = half2(_SourceSize.z * rcp(DOWNSAMPLE), HALF_ZERO);
  490. return GaussianBlur(uv, delta);
  491. }
  492. half VerticalGaussianBlur(Varyings input) : SV_Target
  493. {
  494. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  495. half2 uv = input.texcoord;
  496. half2 delta = half2(HALF_ZERO, _SourceSize.w * rcp(DOWNSAMPLE));
  497. return HALF_ONE - GaussianBlur(uv, delta);
  498. }
  499. // ------------------------------------------------------------------
  500. // Kawase Blur
  501. // ------------------------------------------------------------------
  502. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
  503. // Developed by Masaki Kawase, Bunkasha Games
  504. // Used in DOUBLE-S.T.E.A.L. (aka Wreckless)
  505. // From his GDC2003 Presentation: Frame Buffer Postprocessing Effects in DOUBLE-S.T.E.A.L (Wreckless)
  506. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
  507. half KawaseBlurFilter( half2 texCoord, half2 pixelSize, half iteration )
  508. {
  509. half2 texCoordSample;
  510. half2 halfPixelSize = pixelSize * HALF_HALF;
  511. half2 dUV = ( pixelSize.xy * half2( iteration, iteration ) ) + halfPixelSize.xy;
  512. half cOut;
  513. // Sample top left pixel
  514. texCoordSample.x = texCoord.x - dUV.x;
  515. texCoordSample.y = texCoord.y + dUV.y;
  516. cOut = SAMPLE_BASEMAP_R(texCoordSample);
  517. // Sample top right pixel
  518. texCoordSample.x = texCoord.x + dUV.x;
  519. texCoordSample.y = texCoord.y + dUV.y;
  520. cOut += SAMPLE_BASEMAP_R(texCoordSample);
  521. // Sample bottom right pixel
  522. texCoordSample.x = texCoord.x + dUV.x;
  523. texCoordSample.y = texCoord.y - dUV.y;
  524. cOut += SAMPLE_BASEMAP_R(texCoordSample);
  525. // Sample bottom left pixel
  526. texCoordSample.x = texCoord.x - dUV.x;
  527. texCoordSample.y = texCoord.y - dUV.y;
  528. cOut += SAMPLE_BASEMAP_R(texCoordSample);
  529. // Average
  530. cOut *= half(0.25);
  531. return cOut;
  532. }
  533. half KawaseBlur(Varyings input) : SV_Target
  534. {
  535. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  536. half2 uv = input.texcoord;
  537. half2 texelSize = _SourceSize.zw * rcp(DOWNSAMPLE);
  538. half col = KawaseBlurFilter(uv, texelSize, 0);
  539. col = HALF_ONE - col;
  540. return col;
  541. }
  542. #endif //UNIVERSAL_SSAO_INCLUDED