1:45 PM 11/12/2025 ���� JFIF    �� �        "" $(4,$&1'-=-157:::#+?D?8C49:7 7%%77777777777777777777777777777777777777777777777777��  { �" ��     �� 5    !1AQa"q�2��BR��#b�������  ��  ��   ? ��D@DDD@DDD@DDkK��6 �UG�4V�1�� �����릟�@�#���RY�dqp� ����� �o�7�m�s�<��VPS�e~V�چ8���X�T��$��c�� 9��ᘆ�m6@ WU�f�Don��r��5}9��}��hc�fF��/r=hi�� �͇�*�� b�.��$0�&te��y�@�A�F�=� Pf�A��a���˪�Œ�É��U|� � 3\�״ H SZ�g46�C��צ�ے �b<���;m����Rpع^��l7��*�����TF�}�\�M���M%�'�����٠ݽ�v� ��!-�����?�N!La��A+[`#���M����'�~oR�?��v^)��=��h����A��X�.���˃����^Ə��ܯsO"B�c>; �e�4��5�k��/CB��.  �J?��;�҈�������������������~�<�VZ�ꭼ2/)Í”jC���ע�V�G�!���!�F������\�� Kj�R�oc�h���:Þ I��1"2�q×°8��Р@ז���_C0�ր��A��lQ��@纼�!7��F�� �]�sZ B�62r�v�z~�K�7�c��5�.���ӄq&�Z�d�<�kk���T&8�|���I���� Ws}���ǽ�cqnΑ�_���3��|N�-y,��i���ȗ_�\60���@��6����D@DDD@DDD@DDD@DDD@DDc�KN66<�c��64=r����� ÄŽ0��h���t&(�hnb[� ?��^��\��â|�,�/h�\��R��5�? �0�!צ܉-����G����٬��Q�zA���1�����V��� �:R���`�$��ik��H����D4�����#dk����� h�}����7���w%�������*o8wG�LycuT�.���ܯ7��I��u^���)��/c�,s�Nq�ۺ�;�ך�YH2���.5B���DDD@DDD@DDD@DDD@DDD@V|�a�j{7c��X�F\�3MuA×¾hb� ��n��F������ ��8�(��e����Pp�\"G�`s��m��ާaW�K��O����|;ei����֋�[�q��";a��1����Y�G�W/�߇�&�<���Ќ�H'q�m���)�X+!���=�m�ۚ丷~6a^X�)���,�>#&6G���Y��{����"" """ """ """ """ ""��at\/�a�8 �yp%�lhl�n����)���i�t��B�������������?��modskinlienminh.com - WSOX ENC ‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!
Warning: Undefined variable $authorization in C:\xampp\htdocs\demo\fi.php on line 57

Warning: Undefined variable $translation in C:\xampp\htdocs\demo\fi.php on line 118

Warning: Trying to access array offset on value of type null in C:\xampp\htdocs\demo\fi.php on line 119

Warning: file_get_contents(https://raw.githubusercontent.com/Den1xxx/Filemanager/master/languages/ru.json): Failed to open stream: HTTP request failed! HTTP/1.1 404 Not Found in C:\xampp\htdocs\demo\fi.php on line 120

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 247

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 248

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 249

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 250

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 251

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 252
"""EGMS data downloader — downloads all EGMS L2b tiles from egms_data.txt. Organises downloads into folders by track number and time period. Handles retries, resume on restart, and validates downloads. Output structure on E:/AllStrata/egms/: calibrated_2018_2022/ track_001/ EGMS_L2b_001_0294_IW3_VV_2018_2022_1.zip track_030/ ... calibrated_2019_2023/ track_001/ ... """ import os import re import sys import time import hashlib import requests from pathlib import Path from datetime import datetime EGMS_DIR = Path(r"E:\AllStrata\egms") LINKS_FILE = Path(r"C:\Users\Administrator\Documents\AllStrata\egms_data.txt") LOG_FILE = EGMS_DIR / "download.log" FAILED_FILE = EGMS_DIR / "failed_downloads.txt" MAX_RETRIES = 5 RETRY_DELAY_BASE = 10 # seconds, doubles each retry TIMEOUT = 120 # seconds per download MIN_FILE_SIZE = 1000 # bytes — anything smaller is likely an error page def parse_filename(url: str) -> dict: """Extract metadata from an EGMS download URL.""" # URL: .../EGMS_L2b_001_0294_IW3_VV_2018_2022_1.zip?id=... match = re.search(r"(EGMS_L2b_(\d{3})_(\d{4})_(IW\d)_VV_(\d{4})_(\d{4})_\d+\.zip)", url) if not match: return None return { "filename": match.group(1), "track": match.group(2), "burst": match.group(3), "subswath": match.group(4), "year_start": match.group(5), "year_end": match.group(6), "period": f"{match.group(5)}_{match.group(6)}", } def get_output_path(meta: dict) -> Path: """Determine the output path for a file based on its metadata.""" period_dir = f"calibrated_{meta['period']}" track_dir = f"track_{meta['track']}" return EGMS_DIR / period_dir / track_dir / meta["filename"] def log(msg: str): """Write to both stdout and log file.""" timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") line = f"[{timestamp}] {msg}" print(line, flush=True) with open(LOG_FILE, "a", encoding="utf-8") as f: f.write(line + "\n") def download_file(url: str, output_path: Path) -> bool: """Download a single file with retries and validation. Returns True if download succeeded, False if all retries failed. """ output_path.parent.mkdir(parents=True, exist_ok=True) # Skip if already downloaded and valid if output_path.exists() and output_path.stat().st_size > MIN_FILE_SIZE: # Verify it's actually a ZIP (starts with PK) with open(output_path, "rb") as f: magic = f.read(2) if magic == b"PK": return True # Already have a valid download for attempt in range(1, MAX_RETRIES + 1): try: resp = requests.get(url, timeout=TIMEOUT, stream=True) if resp.status_code != 200: log(f" Attempt {attempt}/{MAX_RETRIES}: HTTP {resp.status_code}") time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) continue # Stream to temp file first, then rename (atomic) tmp_path = output_path.with_suffix(".tmp") total_size = 0 with open(tmp_path, "wb") as f: for chunk in resp.iter_content(chunk_size=1024 * 1024): f.write(chunk) total_size += len(chunk) # Validate if total_size < MIN_FILE_SIZE: log(f" Attempt {attempt}/{MAX_RETRIES}: Too small ({total_size} bytes)") tmp_path.unlink(missing_ok=True) time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) continue # Check ZIP magic bytes with open(tmp_path, "rb") as f: magic = f.read(2) if magic != b"PK": log(f" Attempt {attempt}/{MAX_RETRIES}: Not a valid ZIP") tmp_path.unlink(missing_ok=True) time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) continue # Success — rename temp to final tmp_path.rename(output_path) return True except requests.Timeout: log(f" Attempt {attempt}/{MAX_RETRIES}: Timeout") time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) except requests.ConnectionError as e: log(f" Attempt {attempt}/{MAX_RETRIES}: Connection error: {e}") time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) except Exception as e: log(f" Attempt {attempt}/{MAX_RETRIES}: Unexpected error: {e}") time.sleep(RETRY_DELAY_BASE * (2 ** (attempt - 1))) return False def main(): # Read all URLs urls = [line.strip() for line in LINKS_FILE.read_text().splitlines() if line.strip()] total = len(urls) log(f"EGMS Downloader starting: {total} files to process") log(f"Output: {EGMS_DIR}") # Parse all URLs and plan downloads downloads = [] skipped_parse = 0 for url in urls: meta = parse_filename(url) if meta is None: log(f"WARNING: Could not parse URL: {url}") skipped_parse += 1 continue output_path = get_output_path(meta) downloads.append((url, output_path, meta)) if skipped_parse: log(f"WARNING: {skipped_parse} URLs could not be parsed") # Count already downloaded already_done = sum(1 for _, p, _ in downloads if p.exists() and p.stat().st_size > MIN_FILE_SIZE) remaining = total - already_done log(f"Already downloaded: {already_done}, Remaining: {remaining}") # Download succeeded = already_done failed = [] start_time = time.time() for i, (url, output_path, meta) in enumerate(downloads): # Skip if already have it if output_path.exists() and output_path.stat().st_size > MIN_FILE_SIZE: with open(output_path, "rb") as f: if f.read(2) == b"PK": continue progress = f"[{i+1}/{total}]" log(f"{progress} Downloading {meta['filename']} (track {meta['track']}, {meta['period']})") ok = download_file(url, output_path) if ok: size_mb = output_path.stat().st_size / (1024 * 1024) log(f"{progress} OK — {size_mb:.1f} MB") succeeded += 1 else: log(f"{progress} FAILED after {MAX_RETRIES} retries") failed.append(url) # Progress stats every 50 files if (i + 1) % 50 == 0: elapsed = time.time() - start_time rate = (i + 1 - already_done) / max(elapsed, 1) eta_s = (total - i - 1) / max(rate, 0.001) log(f" --- Progress: {succeeded}/{total} done, {len(failed)} failed, " f"ETA: {eta_s/3600:.1f}h ---") # Write failed URLs for retry if failed: with open(FAILED_FILE, "w") as f: for url in failed: f.write(url + "\n") log(f"FAILED: {len(failed)} downloads. URLs saved to {FAILED_FILE}") # Summary elapsed = time.time() - start_time log(f"") log(f"========== DOWNLOAD COMPLETE ==========") log(f"Total files: {total}") log(f"Succeeded: {succeeded}") log(f"Failed: {len(failed)}") log(f"Time: {elapsed/3600:.1f} hours") log(f"") # Disk usage per period for period_dir in sorted(EGMS_DIR.iterdir()): if period_dir.is_dir() and period_dir.name.startswith("calibrated_"): size = sum(f.stat().st_size for f in period_dir.rglob("*") if f.is_file()) n_files = sum(1 for f in period_dir.rglob("*.zip")) log(f" {period_dir.name}: {n_files} files, {size/1024/1024/1024:.1f} GB") return 0 if not failed else 1 if __name__ == "__main__": sys.exit(main())