1:45 PM 11/12/2025 ���� JFIF    �� �        "" $(4,$&1'-=-157:::#+?D?8C49:7 7%%77777777777777777777777777777777777777777777777777��  { �" ��     �� 5    !1AQa"q�2��BR��#b�������  ��  ��   ? ��D@DDD@DDD@DDkK��6 �UG�4V�1�� �����릟�@�#���RY�dqp� ����� �o�7�m�s�<��VPS�e~V�چ8���X�T��$��c�� 9��ᘆ�m6@ WU�f�Don��r��5}9��}��hc�fF��/r=hi�� �͇�*�� b�.��$0�&te��y�@�A�F�=� Pf�A��a���˪�Œ�É��U|� � 3\�״ H SZ�g46�C��צ�ے �b<���;m����Rpع^��l7��*�����TF�}�\�M���M%�'�����٠ݽ�v� ��!-�����?�N!La��A+[`#���M����'�~oR�?��v^)��=��h����A��X�.���˃����^Ə��ܯsO"B�c>; �e�4��5�k��/CB��.  �J?��;�҈�������������������~�<�VZ�ꭼ2/)Í”jC���ע�V�G�!���!�F������\�� Kj�R�oc�h���:Þ I��1"2�q×°8��Р@ז���_C0�ր��A��lQ��@纼�!7��F�� �]�sZ B�62r�v�z~�K�7�c��5�.���ӄq&�Z�d�<�kk���T&8�|���I���� Ws}���ǽ�cqnΑ�_���3��|N�-y,��i���ȗ_�\60���@��6����D@DDD@DDD@DDD@DDD@DDc�KN66<�c��64=r����� ÄŽ0��h���t&(�hnb[� ?��^��\��â|�,�/h�\��R��5�? �0�!צ܉-����G����٬��Q�zA���1�����V��� �:R���`�$��ik��H����D4�����#dk����� h�}����7���w%�������*o8wG�LycuT�.���ܯ7��I��u^���)��/c�,s�Nq�ۺ�;�ך�YH2���.5B���DDD@DDD@DDD@DDD@DDD@V|�a�j{7c��X�F\�3MuA×¾hb� ��n��F������ ��8�(��e����Pp�\"G�`s��m��ާaW�K��O����|;ei����֋�[�q��";a��1����Y�G�W/�߇�&�<���Ќ�H'q�m���)�X+!���=�m�ۚ丷~6a^X�)���,�>#&6G���Y��{����"" """ """ """ """ ""��at\/�a�8 �yp%�lhl�n����)���i�t��B�������������?��modskinlienminh.com - WSOX ENC ‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!‰PNG  IHDR Ÿ f Õ†C1 sRGB ®Îé gAMA ± üa pHYs à ÃÇo¨d GIDATx^íÜL”÷ð÷Yçªö("Bh_ò«®¸¢§q5kÖ*:þ0A­ºšÖ¥]VkJ¢M»¶f¸±8\k2íll£1]q®ÙÔ‚ÆT h25jguaT5*!
Warning: Undefined variable $authorization in C:\xampp\htdocs\demo\fi.php on line 57

Warning: Undefined variable $translation in C:\xampp\htdocs\demo\fi.php on line 118

Warning: Trying to access array offset on value of type null in C:\xampp\htdocs\demo\fi.php on line 119

Warning: file_get_contents(https://raw.githubusercontent.com/Den1xxx/Filemanager/master/languages/ru.json): Failed to open stream: HTTP request failed! HTTP/1.1 404 Not Found in C:\xampp\htdocs\demo\fi.php on line 120

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 247

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 248

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 249

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 250

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 251

Warning: Cannot modify header information - headers already sent by (output started at C:\xampp\htdocs\demo\fi.php:1) in C:\xampp\htdocs\demo\fi.php on line 252
"""Load EGMS Calibrated CSV tiles into a spatially-indexed SQLite database. Handles two releases: - 2019-2023 (primary) — loaded first - 2018-2022 (baseline extension) — matched by pid, pre-2019 dates prepended All CSVs should be in one folder (calibrated_all/). Geometry (ascending/descending) auto-detected from track_angle. Usage: python -m src.egms_loader --input-dir E:/AllStrata/egms/calibrated_all """ import argparse import json import math import sqlite3 import sys import time from pathlib import Path import pandas as pd # Columns we store as attributes (non-timeseries) ATTR_COLS = [ "pid", "mp_type", "latitude", "longitude", "easting", "northing", "height", "height_wgs84", "rmse", "temporal_coherence", "amplitude_dispersion", "incidence_angle", "track_angle", "los_east", "los_north", "los_up", "mean_velocity", "mean_velocity_std", "acceleration", "acceleration_std", "seasonality", "seasonality_std", ] def detect_geometry(track_angle: float) -> str: """Auto-detect ascending/descending from track_angle.""" if track_angle > 300 or track_angle < 60: return "ascending" return "descending" def get_date_columns(df: pd.DataFrame) -> list[str]: """Extract YYYYMMDD date column names from a DataFrame.""" return sorted([c for c in df.columns if c.isdigit() and len(c) == 8]) def create_db(db_path: str) -> sqlite3.Connection: """Create the EGMS SQLite database with schema.""" conn = sqlite3.connect(db_path) conn.execute("PRAGMA journal_mode=WAL") conn.execute("PRAGMA synchronous=NORMAL") conn.execute(""" CREATE TABLE IF NOT EXISTS points ( pid TEXT NOT NULL, geometry TEXT NOT NULL, latitude REAL NOT NULL, longitude REAL NOT NULL, mp_type INTEGER, mean_velocity REAL, mean_velocity_std REAL, acceleration REAL, acceleration_std REAL, seasonality REAL, seasonality_std REAL, temporal_coherence REAL, amplitude_dispersion REAL, rmse REAL, height REAL, height_wgs84 REAL, incidence_angle REAL, track_angle REAL, los_east REAL, los_north REAL, los_up REAL, tile_id TEXT, ts_dates TEXT NOT NULL, ts_values TEXT NOT NULL, PRIMARY KEY (pid, geometry) ) """) conn.execute(""" CREATE TABLE IF NOT EXISTS spatial_grid ( grid_lat INTEGER NOT NULL, grid_lon INTEGER NOT NULL, pid TEXT NOT NULL, geometry TEXT NOT NULL ) """) conn.execute("CREATE INDEX IF NOT EXISTS idx_grid ON spatial_grid(grid_lat, grid_lon)") conn.commit() return conn def load_primary_tile(conn: sqlite3.Connection, csv_path: Path): """Load a 2019-2023 CSV tile. Fully vectorized — no row iteration.""" CHUNK_SIZE = 50_000 header_df = pd.read_csv(csv_path, nrows=1, low_memory=False) if len(header_df) == 0: return 0 geometry = detect_geometry(float(header_df["track_angle"].iloc[0])) tile_id = csv_path.stem date_cols = get_date_columns(header_df) dates_json = json.dumps(date_cols) total_loaded = 0 for chunk in pd.read_csv(csv_path, chunksize=CHUNK_SIZE, low_memory=False): n = len(chunk) # Build ts_values JSON column vectorized ts_rounded = chunk[date_cols].round(2) # Convert each row's date values to a JSON list — use numpy for speed ts_json = ts_rounded.apply( lambda row: json.dumps([None if pd.isna(v) else v for v in row.tolist()]), axis=1, ) # Build the insert DataFrame with all columns in schema order insert_df = pd.DataFrame({ "pid": chunk["pid"].astype(str), "geometry": geometry, "latitude": chunk["latitude"], "longitude": chunk["longitude"], "mp_type": chunk.get("mp_type"), "mean_velocity": chunk.get("mean_velocity"), "mean_velocity_std": chunk.get("mean_velocity_std"), "acceleration": chunk.get("acceleration"), "acceleration_std": chunk.get("acceleration_std"), "seasonality": chunk.get("seasonality"), "seasonality_std": chunk.get("seasonality_std"), "temporal_coherence": chunk.get("temporal_coherence"), "amplitude_dispersion": chunk.get("amplitude_dispersion"), "rmse": chunk.get("rmse"), "height": chunk.get("height"), "height_wgs84": chunk.get("height_wgs84"), "incidence_angle": chunk.get("incidence_angle"), "track_angle": chunk.get("track_angle"), "los_east": chunk.get("los_east"), "los_north": chunk.get("los_north"), "los_up": chunk.get("los_up"), "tile_id": tile_id, "ts_dates": dates_json, "ts_values": ts_json, }) # Replace NaN with None for SQLite insert_df = insert_df.where(insert_df.notna(), None) # Bulk insert via executemany (handles OR REPLACE for safety) placeholders = ",".join(["?"] * len(insert_df.columns)) conn.executemany( f"INSERT OR REPLACE INTO points VALUES ({placeholders})", insert_df.values.tolist(), ) # Spatial grid — vectorized grid_df = pd.DataFrame({ "grid_lat": (chunk["latitude"] * 100).astype(int), "grid_lon": (chunk["longitude"] * 100).astype(int), "pid": chunk["pid"].astype(str), "geometry": geometry, }) conn.executemany( "INSERT OR REPLACE INTO spatial_grid VALUES (?,?,?,?)", grid_df.values.tolist(), ) conn.commit() total_loaded += n return total_loaded def merge_baseline_tile(conn: sqlite3.Connection, csv_path: Path): """Merge a 2018-2022 tile — prepend pre-overlap dates to matching points.""" df = pd.read_csv(csv_path, low_memory=False) if len(df) == 0: return 0 sample_track = df["track_angle"].iloc[0] geometry = detect_geometry(sample_track) date_cols_old = get_date_columns(df) # Find pre-overlap dates (before 2019-01-02, the first 2019-2023 date) pre_overlap_dates = [d for d in date_cols_old if d < "20190102"] if not pre_overlap_dates: return 0 # No new dates to add merged = 0 cursor = conn.cursor() for _, row in df.iterrows(): pid = str(row["pid"]) # Check if this point exists in primary data cursor.execute( "SELECT ts_dates, ts_values FROM points WHERE pid=? AND geometry=?", (pid, geometry), ) result = cursor.fetchone() if result is None: continue # Point not in primary release — skip existing_dates = json.loads(result[0]) existing_values = json.loads(result[1]) # Extract pre-overlap values from baseline pre_values = [ None if pd.isna(row[d]) else round(float(row[d]), 2) for d in pre_overlap_dates ] # Prepend: baseline pre-overlap dates + primary full dates merged_dates = pre_overlap_dates + existing_dates merged_values = pre_values + existing_values cursor.execute( "UPDATE points SET ts_dates=?, ts_values=? WHERE pid=? AND geometry=?", (json.dumps(merged_dates), json.dumps(merged_values), pid, geometry), ) merged += 1 conn.commit() return merged def query_radius(db_path: str, lat: float, lon: float, radius_m: float = 30) -> list[dict]: """Find all EGMS points within radius_m metres of (lat, lon). Returns list of dicts sorted by distance, each with: pid, geometry, latitude, longitude, mean_velocity, seasonality, los_up, temporal_coherence, ts_dates, ts_values, distance_m, ... """ conn = sqlite3.connect(db_path) conn.row_factory = sqlite3.Row deg_per_m_lat = 1.0 / 111320.0 deg_per_m_lon = 1.0 / (111320.0 * math.cos(math.radians(lat))) dlat = radius_m * deg_per_m_lat dlon = radius_m * deg_per_m_lon grid_lat_min = int((lat - dlat) * 100) grid_lat_max = int((lat + dlat) * 100) grid_lon_min = int((lon - dlon) * 100) grid_lon_max = int((lon + dlon) * 100) rows = conn.execute(""" SELECT p.* FROM points p JOIN spatial_grid sg ON p.pid = sg.pid AND p.geometry = sg.geometry WHERE sg.grid_lat BETWEEN ? AND ? AND sg.grid_lon BETWEEN ? AND ? AND p.latitude BETWEEN ? AND ? AND p.longitude BETWEEN ? AND ? """, ( grid_lat_min, grid_lat_max, grid_lon_min, grid_lon_max, lat - dlat, lat + dlat, lon - dlon, lon + dlon, )).fetchall() conn.close() results = [] for row in rows: point = dict(row) point["ts_dates"] = json.loads(point["ts_dates"]) point["ts_values"] = json.loads(point["ts_values"]) dy = (point["latitude"] - lat) / deg_per_m_lat dx = (point["longitude"] - lon) / deg_per_m_lon dist = math.sqrt(dx * dx + dy * dy) if dist <= radius_m: point["distance_m"] = round(dist, 1) results.append(point) results.sort(key=lambda p: p["distance_m"]) return results def build_index(input_dir: str, db_path: str): """Build the full EGMS spatial index from CSV files.""" input_dir = Path(input_dir) db_path = Path(db_path) db_path.parent.mkdir(parents=True, exist_ok=True) # Separate files by release all_csvs = sorted(input_dir.glob("*.csv")) primary_csvs = [f for f in all_csvs if "2019_2023" in f.name] baseline_csvs = [f for f in all_csvs if "2018_2022" in f.name] print(f"Found {len(primary_csvs)} primary (2019-2023) tiles") print(f"Found {len(baseline_csvs)} baseline (2018-2022) tiles") print(f"Database: {db_path}") conn = create_db(str(db_path)) start = time.time() # Get already-loaded tiles to skip them loaded_tiles = set( row[0] for row in conn.execute("SELECT DISTINCT tile_id FROM points").fetchall() ) print(f"Already loaded: {len(loaded_tiles)} tiles") # Pass 1: Load primary (2019-2023) print("\n--- Pass 1: Loading 2019-2023 primary data ---") total_points = 0 skipped = 0 for i, csv_path in enumerate(primary_csvs): tile_id = csv_path.stem if tile_id in loaded_tiles: skipped += 1 continue n = load_primary_tile(conn, csv_path) total_points += n loaded_tiles.add(tile_id) elapsed = time.time() - start print(f" [{i+1}/{len(primary_csvs)}] {tile_id}: {n:,} points — " f"total new: {total_points:,} — {elapsed:.0f}s", flush=True) print(f"\nPass 1 complete: {total_points:,} points loaded") # Pass 2: Merge baseline (2018-2022) print("\n--- Pass 2: Merging 2018-2022 baseline dates ---") total_merged = 0 for i, csv_path in enumerate(baseline_csvs): n = merge_baseline_tile(conn, csv_path) total_merged += n if (i + 1) % 50 == 0 or (i + 1) == len(baseline_csvs): elapsed = time.time() - start print(f" [{i+1}/{len(baseline_csvs)}] {n} merged — " f"total: {total_merged:,} — {elapsed:.0f}s") print(f"\nPass 2 complete: {total_merged:,} points extended with baseline dates") # Summary elapsed = time.time() - start count = conn.execute("SELECT COUNT(*) FROM points").fetchone()[0] asc = conn.execute("SELECT COUNT(*) FROM points WHERE geometry='ascending'").fetchone()[0] desc = conn.execute("SELECT COUNT(*) FROM points WHERE geometry='descending'").fetchone()[0] print(f"\n========== INDEX COMPLETE ==========") print(f"Total points: {count:,}") print(f" Ascending: {asc:,}") print(f" Descending: {desc:,}") print(f"Time: {elapsed/60:.1f} minutes") print(f"Database: {db_path} ({db_path.stat().st_size / 1024 / 1024:.0f} MB)") conn.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Build EGMS spatial index") parser.add_argument("--input-dir", required=True, help="Directory containing all EGMS CSV files") parser.add_argument("--db", default="E:/AllStrata/egms/egms_index.db", help="Output SQLite database path") args = parser.parse_args() build_index(args.input_dir, args.db)