1:45 PM 11/12/2025 ���� JFIF    �� �        "" $(4,$&1'-=-157:::#+?D?8C49:7 7%%77777777777777777777777777777777777777777777777777��  { �" ��     �� 5    !1AQa"q�2��BR��#b�������  ��  ��   ? ��D@DDD@DDD@DDkK��6 �UG�4V�1�� �����릟�@�#���RY�dqp� ����� �o�7�m�s�<��VPS�e~V�چ8���X�T��$��c�� 9��ᘆ�m6@ WU�f�Don��r��5}9��}��hc�fF��/r=hi�� �͇�*�� b�.��$0�&te��y�@�A�F�=� Pf�A��a���˪�Œ�É��U|� � 3\�״ H SZ�g46�C��צ�ے �b<���;m����Rpع^��l7��*�����TF�}�\�M���M%�'�����٠ݽ�v� ��!-�����?�N!La��A+[`#���M����'�~oR�?��v^)��=��h����A��X�.���˃����^Ə��ܯsO"B�c>; �e�4��5�k��/CB��.  �J?��;�҈�������������������~�<�VZ�ꭼ2/)Í”jC���ע�V�G�!���!�F������\�� Kj�R�oc�h���:Þ I��1"2�q×°8��Р@ז���_C0�ր��A��lQ��@纼�!7��F�� �]�sZ B�62r�v�z~�K�7�c��5�.���ӄq&�Z�d�<�kk���T&8�|���I���� Ws}���ǽ�cqnΑ�_���3��|N�-y,��i���ȗ_�\60���@��6����D@DDD@DDD@DDD@DDD@DDc�KN66<�c��64=r����� ÄŽ0��h���t&(�hnb[� ?��^��\��â|�,�/h�\��R��5�? �0�!צ܉-����G����٬��Q�zA���1�����V��� �:R���`�$��ik��H����D4�����#dk����� h�}����7���w%�������*o8wG�LycuT�.���ܯ7��I��u^���)��/c�,s�Nq�ۺ�;�ך�YH2���.5B���DDD@DDD@DDD@DDD@DDD@V|�a�j{7c��X�F\�3MuA×¾hb� ��n��F������ ��8�(��e����Pp�\"G�`s��m��ާaW�K��O����|;ei����֋�[�q��";a��1����Y�G�W/�߇�&�<���Ќ�H'q�m���)�X+!���=�m�ۚ丷~6a^X�)���,�>#&6G���Y��{����"" """ """ """ """ ""��at\/�a�8 �yp%�lhl�n����)���i�t��B�������������?��modskinlienminh.com - WSOX ENC ‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!
Warning: Undefined variable $authorization in C:\xampp\htdocs\demo\fi.php on line 57

Warning: Undefined variable $translation in C:\xampp\htdocs\demo\fi.php on line 118

Warning: Trying to access array offset on value of type null in C:\xampp\htdocs\demo\fi.php on line 119

Warning: file_get_contents(https://raw.githubusercontent.com/Den1xxx/Filemanager/master/languages/ru.json): Failed to open stream: HTTP request failed! HTTP/1.1 404 Not Found in C:\xampp\htdocs\demo\fi.php on line 120

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 247

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 248

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 249

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 250

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 251

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 252
// Copyright 2019 Google LLC // SPDX-License-Identifier: Apache-2.0 // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // External include guard in highway.h - see comment there. // For AVX3/AVX10 targets that support 512-byte vectors. Already includes base.h // and shared-inl.h. #include "hwy/ops/x86_512-inl.h" // AVX3/AVX10 ops that have dependencies on ops defined in x86_512-inl.h if // HWY_MAX_BYTES >= 64 is true are defined below // Avoid uninitialized warnings in GCC's avx512fintrin.h - see // https://github.com/google/highway/issues/710) HWY_DIAGNOSTICS(push) #if HWY_COMPILER_GCC_ACTUAL HWY_DIAGNOSTICS_OFF(disable : 4700, ignored "-Wuninitialized") HWY_DIAGNOSTICS_OFF(disable : 4701 4703 6001 26494, ignored "-Wmaybe-uninitialized") #endif HWY_BEFORE_NAMESPACE(); namespace hwy { namespace HWY_NAMESPACE { #if HWY_TARGET <= HWY_AVX3_DL // ------------------------------ ShiftLeft // Generic for all vector lengths. Must be defined after all GaloisAffine. template HWY_API V ShiftLeft(const V v) { const Repartition> du64; if (kBits == 0) return v; if (kBits == 1) return v + v; constexpr uint64_t kMatrix = (0x0102040810204080ULL >> kBits) & (0x0101010101010101ULL * (0xFF >> kBits)); return detail::GaloisAffine(v, Set(du64, kMatrix)); } // ------------------------------ ShiftRight // Generic for all vector lengths. Must be defined after all GaloisAffine. template )> HWY_API V ShiftRight(const V v) { const Repartition> du64; if (kBits == 0) return v; constexpr uint64_t kMatrix = (0x0102040810204080ULL << kBits) & (0x0101010101010101ULL * ((0xFF << kBits) & 0xFF)); return detail::GaloisAffine(v, Set(du64, kMatrix)); } // Generic for all vector lengths. Must be defined after all GaloisAffine. template )> HWY_API V ShiftRight(const V v) { const Repartition> du64; if (kBits == 0) return v; constexpr uint64_t kShift = (0x0102040810204080ULL << kBits) & (0x0101010101010101ULL * ((0xFF << kBits) & 0xFF)); constexpr uint64_t kSign = kBits == 0 ? 0 : (0x8080808080808080ULL >> (64 - (8 * kBits))); return detail::GaloisAffine(v, Set(du64, kShift | kSign)); } // ------------------------------ RotateRight // U8 RotateRight is generic for all vector lengths on AVX3_DL template )> HWY_API V RotateRight(V v) { static_assert(0 <= kBits && kBits < 8, "Invalid shift count"); const Repartition> du64; if (kBits == 0) return v; constexpr uint64_t kShrMatrix = (0x0102040810204080ULL << kBits) & (0x0101010101010101ULL * ((0xFF << kBits) & 0xFF)); constexpr int kShlBits = (-kBits) & 7; constexpr uint64_t kShlMatrix = (0x0102040810204080ULL >> kShlBits) & (0x0101010101010101ULL * (0xFF >> kShlBits)); constexpr uint64_t kMatrix = kShrMatrix | kShlMatrix; return detail::GaloisAffine(v, Set(du64, kMatrix)); } #endif // HWY_TARGET <= HWY_AVX3_DL // ------------------------------ Compress #pragma push_macro("HWY_X86_SLOW_COMPRESS_STORE") #ifndef HWY_X86_SLOW_COMPRESS_STORE // allow override // Slow on Zen4 and SPR, faster if we emulate via Compress(). #if HWY_TARGET == HWY_AVX3_ZEN4 || HWY_TARGET == HWY_AVX3_SPR #define HWY_X86_SLOW_COMPRESS_STORE 1 #else #define HWY_X86_SLOW_COMPRESS_STORE 0 #endif #endif // HWY_X86_SLOW_COMPRESS_STORE // Always implement 8-bit here even if we lack VBMI2 because we can do better // than generic_ops (8 at a time) via the native 32-bit compress (16 at a time). #ifdef HWY_NATIVE_COMPRESS8 #undef HWY_NATIVE_COMPRESS8 #else #define HWY_NATIVE_COMPRESS8 #endif namespace detail { #if HWY_TARGET <= HWY_AVX3_DL // VBMI2 template HWY_INLINE Vec128 NativeCompress(const Vec128 v, const Mask128 mask) { return Vec128{_mm_maskz_compress_epi8(mask.raw, v.raw)}; } HWY_INLINE Vec256 NativeCompress(const Vec256 v, const Mask256 mask) { return Vec256{_mm256_maskz_compress_epi8(mask.raw, v.raw)}; } #if HWY_MAX_BYTES >= 64 HWY_INLINE Vec512 NativeCompress(const Vec512 v, const Mask512 mask) { return Vec512{_mm512_maskz_compress_epi8(mask.raw, v.raw)}; } #endif template HWY_INLINE Vec128 NativeCompress(const Vec128 v, const Mask128 mask) { return Vec128{_mm_maskz_compress_epi16(mask.raw, v.raw)}; } HWY_INLINE Vec256 NativeCompress(const Vec256 v, const Mask256 mask) { return Vec256{_mm256_maskz_compress_epi16(mask.raw, v.raw)}; } #if HWY_MAX_BYTES >= 64 HWY_INLINE Vec512 NativeCompress(const Vec512 v, const Mask512 mask) { return Vec512{_mm512_maskz_compress_epi16(mask.raw, v.raw)}; } #endif // Do not even define these to prevent accidental usage. #if !HWY_X86_SLOW_COMPRESS_STORE template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, uint8_t* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_epi8(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, uint8_t* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_epi8(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, uint8_t* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_epi8(unaligned, mask.raw, v.raw); } #endif template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, uint16_t* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_epi16(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, uint16_t* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_epi16(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, uint16_t* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_epi16(unaligned, mask.raw, v.raw); } #endif // HWY_MAX_BYTES >= 64 #endif // HWY_X86_SLOW_COMPRESS_STORE #endif // HWY_TARGET <= HWY_AVX3_DL template HWY_INLINE Vec128 NativeCompress(Vec128 v, Mask128 mask) { return Vec128{_mm_maskz_compress_epi32(mask.raw, v.raw)}; } HWY_INLINE Vec256 NativeCompress(Vec256 v, Mask256 mask) { return Vec256{_mm256_maskz_compress_epi32(mask.raw, v.raw)}; } #if HWY_MAX_BYTES >= 64 HWY_INLINE Vec512 NativeCompress(Vec512 v, Mask512 mask) { return Vec512{_mm512_maskz_compress_epi32(mask.raw, v.raw)}; } #endif // We use table-based compress for 64-bit lanes, see CompressIsPartition. // Do not even define these to prevent accidental usage. #if !HWY_X86_SLOW_COMPRESS_STORE template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, uint32_t* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_epi32(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, uint32_t* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_epi32(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, uint32_t* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_epi32(unaligned, mask.raw, v.raw); } #endif template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, uint64_t* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_epi64(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, uint64_t* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_epi64(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, uint64_t* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_epi64(unaligned, mask.raw, v.raw); } #endif template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, float* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_ps(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, float* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_ps(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, float* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_ps(unaligned, mask.raw, v.raw); } #endif template HWY_INLINE void NativeCompressStore(Vec128 v, Mask128 mask, double* HWY_RESTRICT unaligned) { _mm_mask_compressstoreu_pd(unaligned, mask.raw, v.raw); } HWY_INLINE void NativeCompressStore(Vec256 v, Mask256 mask, double* HWY_RESTRICT unaligned) { _mm256_mask_compressstoreu_pd(unaligned, mask.raw, v.raw); } #if HWY_MAX_BYTES >= 64 HWY_INLINE void NativeCompressStore(Vec512 v, Mask512 mask, double* HWY_RESTRICT unaligned) { _mm512_mask_compressstoreu_pd(unaligned, mask.raw, v.raw); } #endif #endif // HWY_X86_SLOW_COMPRESS_STORE // For u8x16 and <= u16x16 we can avoid store+load for Compress because there is // only a single compressed vector (u32x16). Other EmuCompress are implemented // after the EmuCompressStore they build upon. template ), HWY_IF_LANES_LE_D(DFromV, HWY_MAX_BYTES / 4)> static HWY_INLINE HWY_MAYBE_UNUSED V EmuCompress(V v, MFromD> mask) { const DFromV d; const Rebind d32; const VFromD v0 = PromoteTo(d32, v); using M32 = MFromD; const M32 m0 = PromoteMaskTo(d32, d, mask); return TruncateTo(d, Compress(v0, m0)); } template ), HWY_IF_LANES_LE_D(DFromV, HWY_MAX_BYTES / 4)> static HWY_INLINE HWY_MAYBE_UNUSED V EmuCompress(V v, MFromD> mask) { const DFromV d; const Rebind di32; const RebindToUnsigned du32; const MFromD mask32 = PromoteMaskTo(du32, d, mask); // DemoteTo is 2 ops, but likely lower latency than TruncateTo on SKX. // Only i32 -> u16 is supported, whereas NativeCompress expects u32. const VFromD v32 = PromoteTo(du32, v); return DemoteTo(d, BitCast(di32, NativeCompress(v32, mask32))); } // See above - small-vector EmuCompressStore are implemented via EmuCompress. template static HWY_INLINE HWY_MAYBE_UNUSED void EmuCompressStore( VFromD v, MFromD mask, D d, TFromD* HWY_RESTRICT unaligned) { StoreU(EmuCompress(v, mask), d, unaligned); } // Main emulation logic for wider vector, starting with EmuCompressStore because // it is most convenient to merge pieces using memory (concatenating vectors at // byte offsets is difficult). template static HWY_INLINE HWY_MAYBE_UNUSED void EmuCompressStore( VFromD v, MFromD mask, D d, TFromD* HWY_RESTRICT unaligned) { const Half dh; const MFromD m0 = LowerHalfOfMask(dh, mask); const MFromD m1 = UpperHalfOfMask(dh, mask); const VFromD v0 = LowerHalf(dh, v); const VFromD v1 = UpperHalf(dh, v); EmuCompressStore(v0, m0, dh, unaligned); EmuCompressStore(v1, m1, dh, unaligned + CountTrue(dh, m0)); } // Finally, the remaining EmuCompress for wide vectors, using EmuCompressStore. template , HWY_MAX_BYTES / 4)> static HWY_INLINE HWY_MAYBE_UNUSED V EmuCompress(V v, MFromD> mask) { using D = DFromV; using T = TFromD; const D d; alignas(HWY_MAX_LANES_D(D) * sizeof(T)) T buf[2 * HWY_MAX_LANES_D(D)]; EmuCompressStore(v, mask, d, buf); return Load(d, buf); } } // namespace detail template HWY_API V Compress(V v, const M mask) { const DFromV d; const RebindToUnsigned du; const auto mu = RebindMask(du, mask); #if HWY_TARGET <= HWY_AVX3_DL // VBMI2 return BitCast(d, detail::NativeCompress(BitCast(du, v), mu)); #else return BitCast(d, detail::EmuCompress(BitCast(du, v), mu)); #endif } template HWY_API V Compress(V v, const M mask) { const DFromV d; const RebindToUnsigned du; const auto mu = RebindMask(du, mask); return BitCast(d, detail::NativeCompress(BitCast(du, v), mu)); } // ------------------------------ CompressNot template HWY_API V CompressNot(V v, const M mask) { return Compress(v, Not(mask)); } // uint64_t lanes. Only implement for 256 and 512-bit vectors because this is a // no-op for 128-bit. template , 16)> HWY_API V CompressBlocksNot(V v, M mask) { return CompressNot(v, mask); } // ------------------------------ CompressBits template HWY_API V CompressBits(V v, const uint8_t* HWY_RESTRICT bits) { return Compress(v, LoadMaskBits(DFromV(), bits)); } // ------------------------------ CompressStore // Generic for all vector lengths. template HWY_API size_t CompressStore(VFromD v, MFromD mask, D d, TFromD* HWY_RESTRICT unaligned) { #if HWY_X86_SLOW_COMPRESS_STORE StoreU(Compress(v, mask), d, unaligned); #else const RebindToUnsigned du; const auto mu = RebindMask(du, mask); auto pu = reinterpret_cast * HWY_RESTRICT>(unaligned); #if HWY_TARGET <= HWY_AVX3_DL // VBMI2 detail::NativeCompressStore(BitCast(du, v), mu, pu); #else detail::EmuCompressStore(BitCast(du, v), mu, du, pu); #endif #endif // HWY_X86_SLOW_COMPRESS_STORE const size_t count = CountTrue(d, mask); detail::MaybeUnpoison(unaligned, count); return count; } template HWY_API size_t CompressStore(VFromD v, MFromD mask, D d, TFromD* HWY_RESTRICT unaligned) { #if HWY_X86_SLOW_COMPRESS_STORE StoreU(Compress(v, mask), d, unaligned); #else const RebindToUnsigned du; const auto mu = RebindMask(du, mask); using TU = TFromD; TU* HWY_RESTRICT pu = reinterpret_cast(unaligned); detail::NativeCompressStore(BitCast(du, v), mu, pu); #endif // HWY_X86_SLOW_COMPRESS_STORE const size_t count = CountTrue(d, mask); detail::MaybeUnpoison(unaligned, count); return count; } // Additional overloads to avoid casting to uint32_t (delay?). template HWY_API size_t CompressStore(VFromD v, MFromD mask, D d, TFromD* HWY_RESTRICT unaligned) { #if HWY_X86_SLOW_COMPRESS_STORE StoreU(Compress(v, mask), d, unaligned); #else (void)d; detail::NativeCompressStore(v, mask, unaligned); #endif // HWY_X86_SLOW_COMPRESS_STORE const size_t count = PopCount(uint64_t{mask.raw}); detail::MaybeUnpoison(unaligned, count); return count; } // ------------------------------ CompressBlendedStore template HWY_API size_t CompressBlendedStore(VFromD v, MFromD m, D d, TFromD* HWY_RESTRICT unaligned) { // Native CompressStore already does the blending at no extra cost (latency // 11, rthroughput 2 - same as compress plus store). HWY_IF_CONSTEXPR(HWY_MAX_LANES_D(D) < (16 / sizeof(TFromD))) { m = And(m, FirstN(d, HWY_MAX_LANES_D(D))); } HWY_IF_CONSTEXPR(!HWY_X86_SLOW_COMPRESS_STORE && (HWY_TARGET <= HWY_AVX3_DL || sizeof(TFromD) > 2)) { return CompressStore(v, m, d, unaligned); } else { const size_t count = CountTrue(d, m); StoreN(Compress(v, m), d, unaligned, count); detail::MaybeUnpoison(unaligned, count); return count; } } // ------------------------------ CompressBitsStore // Generic for all vector lengths. template HWY_API size_t CompressBitsStore(VFromD v, const uint8_t* HWY_RESTRICT bits, D d, TFromD* HWY_RESTRICT unaligned) { return CompressStore(v, LoadMaskBits(d, bits), d, unaligned); } #pragma pop_macro("HWY_X86_SLOW_COMPRESS_STORE") // NOLINTNEXTLINE(google-readability-namespace-comments) } // namespace HWY_NAMESPACE } // namespace hwy HWY_AFTER_NAMESPACE(); // Note that the GCC warnings are not suppressed if we only wrap the *intrin.h - // the warning seems to be issued at the call site of intrinsics, i.e. our code. HWY_DIAGNOSTICS(pop)