暫無描述
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ShaderVariablesFunctions.hlsl 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. #ifndef UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED
  2. #define UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED
  3. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.deprecated.hlsl"
  4. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Debug/DebuggingCommon.hlsl"
  5. VertexPositionInputs GetVertexPositionInputs(float3 positionOS)
  6. {
  7. VertexPositionInputs input;
  8. input.positionWS = TransformObjectToWorld(positionOS);
  9. input.positionVS = TransformWorldToView(input.positionWS);
  10. input.positionCS = TransformWorldToHClip(input.positionWS);
  11. float4 ndc = input.positionCS * 0.5f;
  12. input.positionNDC.xy = float2(ndc.x, ndc.y * _ProjectionParams.x) + ndc.w;
  13. input.positionNDC.zw = input.positionCS.zw;
  14. return input;
  15. }
  16. VertexNormalInputs GetVertexNormalInputs(float3 normalOS)
  17. {
  18. VertexNormalInputs tbn;
  19. tbn.tangentWS = real3(1.0, 0.0, 0.0);
  20. tbn.bitangentWS = real3(0.0, 1.0, 0.0);
  21. tbn.normalWS = TransformObjectToWorldNormal(normalOS);
  22. return tbn;
  23. }
  24. VertexNormalInputs GetVertexNormalInputs(float3 normalOS, float4 tangentOS)
  25. {
  26. VertexNormalInputs tbn;
  27. // mikkts space compliant. only normalize when extracting normal at frag.
  28. real sign = real(tangentOS.w) * GetOddNegativeScale();
  29. tbn.normalWS = TransformObjectToWorldNormal(normalOS);
  30. tbn.tangentWS = real3(TransformObjectToWorldDir(tangentOS.xyz));
  31. tbn.bitangentWS = real3(cross(tbn.normalWS, float3(tbn.tangentWS))) * sign;
  32. return tbn;
  33. }
  34. float4 GetScaledScreenParams()
  35. {
  36. return _ScaledScreenParams;
  37. }
  38. // Returns 'true' if the current view performs a perspective projection.
  39. bool IsPerspectiveProjection()
  40. {
  41. return (unity_OrthoParams.w == 0);
  42. }
  43. float3 GetCameraPositionWS()
  44. {
  45. // Currently we do not support Camera Relative Rendering so
  46. // we simply return the _WorldSpaceCameraPos until then
  47. return _WorldSpaceCameraPos;
  48. // We will replace the code above with this one once
  49. // we start supporting Camera Relative Rendering
  50. //#if (SHADEROPTIONS_CAMERA_RELATIVE_RENDERING != 0)
  51. // return float3(0, 0, 0);
  52. //#else
  53. // return _WorldSpaceCameraPos;
  54. //#endif
  55. }
  56. // Could be e.g. the position of a primary camera or a shadow-casting light.
  57. float3 GetCurrentViewPosition()
  58. {
  59. // Currently we do not support Camera Relative Rendering so
  60. // we simply return the _WorldSpaceCameraPos until then
  61. return GetCameraPositionWS();
  62. // We will replace the code above with this one once
  63. // we start supporting Camera Relative Rendering
  64. //#if defined(SHADERPASS) && (SHADERPASS != SHADERPASS_SHADOWS)
  65. // return GetCameraPositionWS();
  66. //#else
  67. // // This is a generic solution.
  68. // // However, for the primary camera, using '_WorldSpaceCameraPos' is better for cache locality,
  69. // // and in case we enable camera-relative rendering, we can statically set the position is 0.
  70. // return UNITY_MATRIX_I_V._14_24_34;
  71. //#endif
  72. }
  73. // Returns the forward (central) direction of the current view in the world space.
  74. float3 GetViewForwardDir()
  75. {
  76. float4x4 viewMat = GetWorldToViewMatrix();
  77. return -viewMat[2].xyz;
  78. }
  79. // Computes the world space view direction (pointing towards the viewer).
  80. float3 GetWorldSpaceViewDir(float3 positionWS)
  81. {
  82. if (IsPerspectiveProjection())
  83. {
  84. // Perspective
  85. return GetCurrentViewPosition() - positionWS;
  86. }
  87. else
  88. {
  89. // Orthographic
  90. return -GetViewForwardDir();
  91. }
  92. }
  93. // Computes the object space view direction (pointing towards the viewer).
  94. half3 GetObjectSpaceNormalizeViewDir(float3 positionOS)
  95. {
  96. if (IsPerspectiveProjection())
  97. {
  98. // Perspective
  99. float3 V = TransformWorldToObject(GetCurrentViewPosition()) - positionOS;
  100. return half3(normalize(V));
  101. }
  102. else
  103. {
  104. // Orthographic
  105. return half3(TransformWorldToObjectNormal(-GetViewForwardDir()));
  106. }
  107. }
  108. half3 GetWorldSpaceNormalizeViewDir(float3 positionWS)
  109. {
  110. if (IsPerspectiveProjection())
  111. {
  112. // Perspective
  113. float3 V = GetCurrentViewPosition() - positionWS;
  114. return half3(normalize(V));
  115. }
  116. else
  117. {
  118. // Orthographic
  119. return half3(-GetViewForwardDir());
  120. }
  121. }
  122. // UNITY_MATRIX_V defines a right-handed view space with the Z axis pointing towards the viewer.
  123. // This function reverses the direction of the Z axis (so that it points forward),
  124. // making the view space coordinate system left-handed.
  125. void GetLeftHandedViewSpaceMatrices(out float4x4 viewMatrix, out float4x4 projMatrix)
  126. {
  127. viewMatrix = UNITY_MATRIX_V;
  128. viewMatrix._31_32_33_34 = -viewMatrix._31_32_33_34;
  129. projMatrix = UNITY_MATRIX_P;
  130. projMatrix._13_23_33_43 = -projMatrix._13_23_33_43;
  131. }
  132. // Constants that represent material surface types
  133. //
  134. // These are expected to align with the commonly used "_Surface" material property
  135. static const half kSurfaceTypeOpaque = 0.0;
  136. static const half kSurfaceTypeTransparent = 1.0;
  137. // Returns true if the input value represents an opaque surface
  138. bool IsSurfaceTypeOpaque(half surfaceType)
  139. {
  140. return (surfaceType == kSurfaceTypeOpaque);
  141. }
  142. // Returns true if the input value represents a transparent surface
  143. bool IsSurfaceTypeTransparent(half surfaceType)
  144. {
  145. return (surfaceType == kSurfaceTypeTransparent);
  146. }
  147. // Only define the alpha clipping helpers when the alpha test define is present.
  148. // This should help identify usage errors early.
  149. #if defined(_ALPHATEST_ON)
  150. // Returns true if AlphaToMask functionality is currently available
  151. // NOTE: This does NOT guarantee that AlphaToMask is enabled for the current draw. It only indicates that AlphaToMask functionality COULD be enabled for it.
  152. // In cases where AlphaToMask COULD be enabled, we export a specialized alpha value from the shader.
  153. // When AlphaToMask is enabled: The specialized alpha value is combined with the sample mask
  154. // When AlphaToMask is not enabled: The specialized alpha value is either written into the framebuffer or dropped entirely depending on the color write mask
  155. bool IsAlphaToMaskAvailable()
  156. {
  157. return (_AlphaToMaskAvailable != 0.0);
  158. }
  159. // Returns a sharpened alpha value for use with alpha to coverage
  160. // This function behaves correctly in cases where alpha and cutoff are constant values (degenerate usage of alpha clipping)
  161. half SharpenAlphaStrict(half alpha, half alphaClipTreshold)
  162. {
  163. half dAlpha = fwidth(alpha);
  164. return saturate(((alpha - alphaClipTreshold - (0.5 * dAlpha)) / max(dAlpha, 0.0001)) + 1.0);
  165. }
  166. // When AlphaToMask is available: Returns a modified alpha value that should be exported from the shader so it can be combined with the sample mask
  167. // When AlphaToMask is not available: Terminates the current invocation if the alpha value is below the cutoff and returns the input alpha value otherwise
  168. half AlphaClip(half alpha, half cutoff)
  169. {
  170. bool a2c = IsAlphaToMaskAvailable();
  171. // We explicitly detect cases where the alpha cutoff threshold is zero or below.
  172. // When this case occurs, we need to modify the alpha to coverage logic to avoid visual artifacts.
  173. bool zeroCutoff = (cutoff <= 0.0);
  174. // If the user has specified zero as the cutoff threshold, the expectation is that the shader will function as if alpha-clipping was disabled.
  175. // Ideally, the user should just turn off the alpha-clipping feature in this case, but in order to make this case work as expected, we force alpha
  176. // to 1.0 here to ensure that alpha-to-coverage never throws away samples when its active. (This would cause opaque objects to appear transparent)
  177. half alphaToCoverageAlpha = zeroCutoff ? 1.0 : SharpenAlphaStrict(alpha, cutoff);
  178. // When the alpha to coverage alpha is used for clipping, we subtract a small value from it to ensure that pixels with zero alpha exit early
  179. // rather than running the entire shader and then multiplying the sample coverage mask by zero which outputs nothing.
  180. half clipVal = (a2c && !zeroCutoff) ? (alphaToCoverageAlpha - 0.0001) : (alpha - cutoff);
  181. // When alpha-to-coverage is available: Use the specialized value which will be exported from the shader and combined with the MSAA coverage mask.
  182. // When alpha-to-coverage is not available: Use the "clipped" value. A clipped value will always result in thread termination via the clip() logic below.
  183. half outputAlpha = a2c ? alphaToCoverageAlpha : alpha;
  184. clip(clipVal);
  185. return outputAlpha;
  186. }
  187. #endif
  188. // Terminates the current invocation if the input alpha value is below the specified cutoff value and returns an updated alpha value otherwise.
  189. // When provided, the offset value is added to the cutoff value during the comparison logic.
  190. // The return value from this function should be exported as the final alpha value in fragment shaders so it can be combined with the MSAA coverage mask.
  191. //
  192. // When _ALPHATEST_ON is defined: The returned value follows the behavior noted in the AlphaClip function
  193. // When _ALPHATEST_ON is not defined: The returned value is equal to the original alpha input parameter
  194. //
  195. // NOTE: When _ALPHATEST_ON is not defined, this function is effectively a no-op.
  196. real AlphaDiscard(real alpha, real cutoff, real offset = real(0.0))
  197. {
  198. #if defined(_ALPHATEST_ON)
  199. if (IsAlphaDiscardEnabled())
  200. alpha = AlphaClip(alpha, cutoff + offset);
  201. #endif
  202. return alpha;
  203. }
  204. half OutputAlpha(half alpha, bool isTransparent)
  205. {
  206. if (isTransparent)
  207. {
  208. return alpha;
  209. }
  210. else
  211. {
  212. #if defined(_ALPHATEST_ON)
  213. // Opaque materials should always export an alpha value of 1.0 unless alpha-to-coverage is available
  214. return IsAlphaToMaskAvailable() ? alpha : 1.0;
  215. #else
  216. return 1.0;
  217. #endif
  218. }
  219. }
  220. half3 AlphaModulate(half3 albedo, half alpha)
  221. {
  222. // Fake alpha for multiply blend by lerping albedo towards 1 (white) using alpha.
  223. // Manual adjustment for "lighter" multiply effect (similar to "premultiplied alpha")
  224. // would be painting whiter pixels in the texture.
  225. // This emulates that procedure in shader, so it should be applied to the base/source color.
  226. #if defined(_ALPHAMODULATE_ON)
  227. return lerp(half3(1.0, 1.0, 1.0), albedo, alpha);
  228. #else
  229. return albedo;
  230. #endif
  231. }
  232. half3 AlphaPremultiply(half3 albedo, half alpha)
  233. {
  234. // Multiply alpha into albedo only for Preserve Specular material diffuse part.
  235. // Preserve Specular material (glass like) has different alpha for diffuse and specular lighting.
  236. // Logically this is "variable" Alpha blending.
  237. // (HW blend mode is premultiply, but with alpha multiply in shader.)
  238. #if defined(_ALPHAPREMULTIPLY_ON)
  239. return albedo * alpha;
  240. #endif
  241. return albedo;
  242. }
  243. // Normalization used to depend on SHADER_QUALITY
  244. // Currently we always normalize to avoid lighting issues
  245. // and platform inconsistencies.
  246. half3 NormalizeNormalPerVertex(half3 normalWS)
  247. {
  248. return normalize(normalWS);
  249. }
  250. float3 NormalizeNormalPerVertex(float3 normalWS)
  251. {
  252. return normalize(normalWS);
  253. }
  254. half3 NormalizeNormalPerPixel(half3 normalWS)
  255. {
  256. // With XYZ normal map encoding we sporadically sample normals with near-zero-length causing Inf/NaN
  257. #if defined(UNITY_NO_DXT5nm) && defined(_NORMALMAP)
  258. return SafeNormalize(normalWS);
  259. #else
  260. return normalize(normalWS);
  261. #endif
  262. }
  263. float3 NormalizeNormalPerPixel(float3 normalWS)
  264. {
  265. #if defined(UNITY_NO_DXT5nm) && defined(_NORMALMAP)
  266. return SafeNormalize(normalWS);
  267. #else
  268. return normalize(normalWS);
  269. #endif
  270. }
  271. real ComputeFogFactorZ0ToFar(float z)
  272. {
  273. #if defined(FOG_LINEAR)
  274. // factor = (end-z)/(end-start) = z * (-1/(end-start)) + (end/(end-start))
  275. float fogFactor = saturate(z * unity_FogParams.z + unity_FogParams.w);
  276. return real(fogFactor);
  277. #elif defined(FOG_EXP) || defined(FOG_EXP2)
  278. // factor = exp(-(density*z)^2)
  279. // -density * z computed at vertex
  280. return real(unity_FogParams.x * z);
  281. #else
  282. return real(0.0);
  283. #endif
  284. }
  285. real ComputeFogFactor(float zPositionCS)
  286. {
  287. float clipZ_0Far = UNITY_Z_0_FAR_FROM_CLIPSPACE(zPositionCS);
  288. return ComputeFogFactorZ0ToFar(clipZ_0Far);
  289. }
  290. half ComputeFogIntensity(half fogFactor)
  291. {
  292. half fogIntensity = half(0.0);
  293. #if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
  294. #if defined(FOG_EXP)
  295. // factor = exp(-density*z)
  296. // fogFactor = density*z compute at vertex
  297. fogIntensity = saturate(exp2(-fogFactor));
  298. #elif defined(FOG_EXP2)
  299. // factor = exp(-(density*z)^2)
  300. // fogFactor = density*z compute at vertex
  301. fogIntensity = saturate(exp2(-fogFactor * fogFactor));
  302. #elif defined(FOG_LINEAR)
  303. fogIntensity = fogFactor;
  304. #endif
  305. #endif
  306. return fogIntensity;
  307. }
  308. // Force enable fog fragment shader evaluation
  309. #define _FOG_FRAGMENT 1
  310. real InitializeInputDataFog(float4 positionWS, real vertFogFactor)
  311. {
  312. real fogFactor = 0.0;
  313. #if defined(_FOG_FRAGMENT)
  314. #if (defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2))
  315. // Compiler eliminates unused math --> matrix.column_z * vec
  316. float viewZ = -(mul(UNITY_MATRIX_V, positionWS).z);
  317. // View Z is 0 at camera pos, remap 0 to near plane.
  318. float nearToFarZ = max(viewZ - _ProjectionParams.y, 0);
  319. fogFactor = ComputeFogFactorZ0ToFar(nearToFarZ);
  320. #endif
  321. #else
  322. fogFactor = vertFogFactor;
  323. #endif
  324. return fogFactor;
  325. }
  326. float ComputeFogIntensity(float fogFactor)
  327. {
  328. float fogIntensity = 0.0;
  329. #if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
  330. #if defined(FOG_EXP)
  331. // factor = exp(-density*z)
  332. // fogFactor = density*z compute at vertex
  333. fogIntensity = saturate(exp2(-fogFactor));
  334. #elif defined(FOG_EXP2)
  335. // factor = exp(-(density*z)^2)
  336. // fogFactor = density*z compute at vertex
  337. fogIntensity = saturate(exp2(-fogFactor * fogFactor));
  338. #elif defined(FOG_LINEAR)
  339. fogIntensity = fogFactor;
  340. #endif
  341. #endif
  342. return fogIntensity;
  343. }
  344. half3 MixFogColor(half3 fragColor, half3 fogColor, half fogFactor)
  345. {
  346. #if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
  347. half fogIntensity = ComputeFogIntensity(fogFactor);
  348. fragColor = lerp(fogColor, fragColor, fogIntensity);
  349. #endif
  350. return fragColor;
  351. }
  352. float3 MixFogColor(float3 fragColor, float3 fogColor, float fogFactor)
  353. {
  354. #if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
  355. if (IsFogEnabled())
  356. {
  357. float fogIntensity = ComputeFogIntensity(fogFactor);
  358. fragColor = lerp(fogColor, fragColor, fogIntensity);
  359. }
  360. #endif
  361. return fragColor;
  362. }
  363. half3 MixFog(half3 fragColor, half fogFactor)
  364. {
  365. return MixFogColor(fragColor, half3(unity_FogColor.rgb), fogFactor);
  366. }
  367. float3 MixFog(float3 fragColor, float fogFactor)
  368. {
  369. return MixFogColor(fragColor, unity_FogColor.rgb, fogFactor);
  370. }
  371. // Linear depth buffer value between [0, 1] or [1, 0] to eye depth value between [near, far]
  372. half LinearDepthToEyeDepth(half rawDepth)
  373. {
  374. #if UNITY_REVERSED_Z
  375. return half(_ProjectionParams.z - (_ProjectionParams.z - _ProjectionParams.y) * rawDepth);
  376. #else
  377. return half(_ProjectionParams.y + (_ProjectionParams.z - _ProjectionParams.y) * rawDepth);
  378. #endif
  379. }
  380. float LinearDepthToEyeDepth(float rawDepth)
  381. {
  382. #if UNITY_REVERSED_Z
  383. return _ProjectionParams.z - (_ProjectionParams.z - _ProjectionParams.y) * rawDepth;
  384. #else
  385. return _ProjectionParams.y + (_ProjectionParams.z - _ProjectionParams.y) * rawDepth;
  386. #endif
  387. }
  388. void TransformScreenUV(inout float2 uv, float screenHeight)
  389. {
  390. #if UNITY_UV_STARTS_AT_TOP
  391. uv.y = screenHeight - (uv.y * _ScaleBiasRt.x + _ScaleBiasRt.y * screenHeight);
  392. #endif
  393. }
  394. void TransformScreenUV(inout float2 uv)
  395. {
  396. #if UNITY_UV_STARTS_AT_TOP
  397. TransformScreenUV(uv, GetScaledScreenParams().y);
  398. #endif
  399. }
  400. void TransformNormalizedScreenUV(inout float2 uv)
  401. {
  402. #if UNITY_UV_STARTS_AT_TOP
  403. TransformScreenUV(uv, 1.0);
  404. #endif
  405. }
  406. float2 GetNormalizedScreenSpaceUV(float2 positionCS)
  407. {
  408. float2 normalizedScreenSpaceUV = positionCS.xy * rcp(GetScaledScreenParams().xy);
  409. TransformNormalizedScreenUV(normalizedScreenSpaceUV);
  410. return normalizedScreenSpaceUV;
  411. }
  412. float2 GetNormalizedScreenSpaceUV(float4 positionCS)
  413. {
  414. return GetNormalizedScreenSpaceUV(positionCS.xy);
  415. }
  416. // Select uint4 component by index.
  417. // Helper to improve codegen for 2d indexing (data[x][y])
  418. // Replace:
  419. // data[i / 4][i % 4];
  420. // with:
  421. // select4(data[i / 4], i % 4);
  422. uint Select4(uint4 v, uint i)
  423. {
  424. // x = 0 = 00
  425. // y = 1 = 01
  426. // z = 2 = 10
  427. // w = 3 = 11
  428. uint mask0 = uint(int(i << 31) >> 31);
  429. uint mask1 = uint(int(i << 30) >> 31);
  430. return
  431. (((v.w & mask0) | (v.z & ~mask0)) & mask1) |
  432. (((v.y & mask0) | (v.x & ~mask0)) & ~mask1);
  433. }
  434. #if SHADER_TARGET < 45
  435. uint URP_FirstBitLow(uint m)
  436. {
  437. // http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightFloatCast
  438. return (asuint((float)(m & asuint(-asint(m)))) >> 23) - 0x7F;
  439. }
  440. #define FIRST_BIT_LOW URP_FirstBitLow
  441. #else
  442. #define FIRST_BIT_LOW firstbitlow
  443. #endif
  444. #define UnityStereoTransformScreenSpaceTex(uv) uv
  445. uint GetMeshRenderingLayer()
  446. {
  447. return asuint(unity_RenderingLayer.x);
  448. }
  449. float EncodeMeshRenderingLayer(uint renderingLayer)
  450. {
  451. // Force any bits above max to be skipped
  452. renderingLayer &= _RenderingLayerMaxInt;
  453. // This is copy of "real PackInt(uint i, uint numBits)" from com.unity.render-pipelines.core\ShaderLibrary\Packing.hlsl
  454. // Differences of this copy:
  455. // - Pre-computed rcpMaxInt
  456. // - Returns float instead of real
  457. float rcpMaxInt = _RenderingLayerRcpMaxInt;
  458. return saturate(renderingLayer * rcpMaxInt);
  459. }
  460. uint DecodeMeshRenderingLayer(float renderingLayer)
  461. {
  462. // This is copy of "uint UnpackInt(real f, uint numBits)" from com.unity.render-pipelines.core\ShaderLibrary\Packing.hlsl
  463. // Differences of this copy:
  464. // - Pre-computed maxInt
  465. // - Parameter f is float instead of real
  466. uint maxInt = _RenderingLayerMaxInt;
  467. return (uint)(renderingLayer * maxInt + 0.5); // Round instead of truncating
  468. }
  469. // TODO: implement
  470. float GetCurrentExposureMultiplier()
  471. {
  472. return 1;
  473. }
  474. #endif // UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED