rlogh commited on
Commit
f22c741
·
verified ·
1 Parent(s): 2852fb9

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +4 -3
  2. app.py +618 -0
  3. gitattributes +35 -0
  4. requirements.txt +11 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Test
3
- emoji: 🚀
4
  colorFrom: yellow
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.49.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Newproto
3
+ emoji: 🐢
4
  colorFrom: yellow
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.49.1
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled0.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1sAnaOUZv4qGku0J47sCP7XvSQnMFsTCL
8
+ """
9
+
10
+ # -*- coding: utf-8 -*-
11
+ """updated_prototype.ipynb
12
+
13
+ Automatically generated by Colab.
14
+
15
+ Original file is located at
16
+ https://colab.research.google.com/drive/1qhzqPF3RjCwAc1pOzOsyDpwFQkm8nadC
17
+ """
18
+
19
+ # !pip install autogluon.multimodal
20
+
21
+ """
22
+ Lanternfly Field Capture Space - Modular Deployment (V11)
23
+
24
+ This version integrates the image classification model (using AutoGluon)
25
+ into a multi-cell Colab deployment structure. All GPS and Data Saving
26
+ functionality remains disabled as placeholders.
27
+ """
28
+
29
+ # ==============================================================================
30
+ # CELL 1: SETUP AND IMPORTS
31
+ # ==============================================================================
32
+
33
+ # Install necessary library (Autogluon)
34
+ # NOTE: If running in Colab, uncomment the line below:
35
+ # !pip install autogluon.multimodal --quiet
36
+
37
+ import gradio as gr
38
+ import os
39
+ import json
40
+ import uuid
41
+ import shutil
42
+ import zipfile
43
+ import pathlib
44
+ import tempfile
45
+ import pandas
46
+ import PIL.Image
47
+ from datetime import datetime
48
+
49
+ # NOTE: Since image_model uses these, we bring them back for the model integration
50
+ import huggingface_hub
51
+ import autogluon.multimodal
52
+
53
+ # --- Core App Configuration (Placeholder) ---
54
+ HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HF_TOKEN_SPACE")
55
+ DATASET_REPO = os.getenv("DATASET_REPO", "rlogh/lanternfly-data")
56
+
57
+ # --- Utility Functions (Active) ---
58
+
59
+ def get_current_time():
60
+ """Get current timestamp in ISO format"""
61
+ return datetime.now().isoformat()
62
+
63
+ def handle_time_capture():
64
+ """Handle time capture and return status message and timestamp."""
65
+ timestamp = get_current_time()
66
+ status_msg = f"🕐 **Time Captured**: {timestamp}"
67
+ return status_msg, timestamp
68
+
69
+ # --- Placeholder Stubs ---
70
+
71
+ # def _append_jsonl_in_repo(...): pass
72
+ # def _save_image_to_repo(...): pass
73
+ # def handle_gps_location(...): pass
74
+
75
+ def handle_gps_location(json_str):
76
+ """Handle GPS location data from JavaScript and return values for the textboxes"""
77
+ try:
78
+ data = json.loads(json_str)
79
+ if 'error' in data:
80
+ status_msg = f"❌ **GPS Error**: {data['error']}"
81
+ return status_msg, data['error'], "", "", ""
82
+
83
+ lat = str(data.get('latitude', ''))
84
+ lon = str(data.get('longitude', ''))
85
+ accuracy = str(data.get('accuracy', ''))
86
+ timestamp = data.get('timestamp', '')
87
+
88
+ # Convert timestamp to ISO string if it's a number
89
+ if timestamp and isinstance(timestamp, (int, float)):
90
+ from datetime import datetime
91
+ timestamp = datetime.fromtimestamp(timestamp / 1000).isoformat()
92
+
93
+ status_msg = f"✅ **GPS Captured**: {lat[:8]}, {lon[:8]} (accuracy: {accuracy}m)"
94
+ return status_msg, lat, lon, accuracy, timestamp
95
+
96
+ except Exception as e:
97
+ status_msg = f"❌ **Error**: {str(e)}"
98
+ return status_msg, f"Error parsing GPS data: {str(e)}", "", "", ""
99
+
100
+ def get_gps_js():
101
+ """JavaScript for GPS capture using hidden textbox approach"""
102
+ return """
103
+ () => {
104
+ // find the textarea element inside Gradio textbox by its elem_id
105
+ const textarea = document.querySelector('#hidden_gps_input textarea');
106
+ if (!textarea) {
107
+ console.log("Hidden GPS textbox not found");
108
+ return;
109
+ }
110
+ if (!navigator.geolocation) {
111
+ textarea.value = JSON.stringify({error: "Geolocation not supported"});
112
+ textarea.dispatchEvent(new Event('input', { bubbles: true }));
113
+ return;
114
+ }
115
+ navigator.geolocation.getCurrentPosition(
116
+ function(position) {
117
+ const data = {
118
+ latitude: position.coords.latitude,
119
+ longitude: position.coords.longitude,
120
+ accuracy: position.coords.accuracy,
121
+ timestamp: new Date().toISOString()
122
+ };
123
+ textarea.value = JSON.stringify(data);
124
+ // dispatch 'input' event so Gradio notices the change
125
+ textarea.dispatchEvent(new Event('input', { bubbles: true }));
126
+ },
127
+ function(err) {
128
+ textarea.value = JSON.stringify({ error: err.message });
129
+ textarea.dispatchEvent(new Event('input', { bubbles: true }));
130
+ },
131
+ { enableHighAccuracy: true, timeout: 10000 }
132
+ );
133
+ }
134
+ """
135
+
136
+ def save_to_dataset(image, lat, lon, accuracy_m, device_ts):
137
+ """Placeholder for Save function. Returns a simple confirmation and mock data."""
138
+ if image is None:
139
+ return "❌ **Error**: Please capture or upload a photo first.", ""
140
+
141
+ # Mock Data for preview
142
+ mock_data = {
143
+ "image": "image.jpg",
144
+ "latitude": lat,
145
+ "longitude": lon,
146
+ "accuracy_m": accuracy_m,
147
+ "device_timestamp": device_ts,
148
+ "status": "Saving Disabled"
149
+ }
150
+
151
+ # You must include the return statement
152
+ status = "✅ **Test Save Successful!** (No data saved to HF dataset)"
153
+ return status, json.dumps(mock_data, indent=2)
154
+
155
+ # FIX 2: Define placeholder_time_capture (alias for handle_time_capture)
156
+ placeholder_time_capture = handle_time_capture
157
+
158
+ # FIX 3: Define placeholder_save_action (alias for save_to_dataset)
159
+ placeholder_save_action = save_to_dataset
160
+
161
+ # ==============================================================================
162
+ # CELL 2: MODEL LOADING AND PREDICTION LOGIC
163
+ # ==============================================================================
164
+
165
+ # --- Model Configuration ---
166
+ # NOTE: Swap MODEL_REPO_ID and ZIP_FILENAME to load different models
167
+ MODEL_REPO_ID = "ddecosmo/lanternfly_classifier"
168
+ ZIP_FILENAME = "autogluon_image_predictor_dir.zip"
169
+ CLASS_LABELS = {0: "Lanternfly", 1: "Other Insect", 2: "No Insect"}
170
+
171
+ # Local cache/extract dirs
172
+ CACHE_DIR = pathlib.Path("hf_assets")
173
+ EXTRACT_DIR = CACHE_DIR / "predictor_native"
174
+ PREDICTOR = None # Initialized below
175
+
176
+ # Download & load the native predictor
177
+ def _prepare_predictor_dir() -> str:
178
+ """Downloads ZIP model from HF and extracts it for AutoGluon loading."""
179
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
180
+
181
+ # Use HF_TOKEN from environment if available
182
+ token = os.getenv("HF_TOKEN", None)
183
+
184
+ local_zip = huggingface_hub.hf_hub_download(
185
+ repo_id=MODEL_REPO_ID,
186
+ filename=ZIP_FILENAME,
187
+ repo_type="model",
188
+ token=token,
189
+ local_dir=str(CACHE_DIR),
190
+ local_dir_use_symlinks=False,
191
+ )
192
+ if EXTRACT_DIR.exists():
193
+ shutil.rmtree(EXTRACT_DIR)
194
+ EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
195
+ with zipfile.ZipFile(local_zip, "r") as zf:
196
+ zf.extractall(str(EXTRACT_DIR))
197
+
198
+ # Handle single nested directory structure common with AutoGluon exports
199
+ contents = list(EXTRACT_DIR.iterdir())
200
+ predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR
201
+ return str(predictor_root)
202
+
203
+ # Load the model only once
204
+ PREDICTOR_LOAD_STATUS = "Attempting to load AutoGluon Predictor..." # FIX 4: Define PREDICTOR_LOAD_STATUS
205
+ try:
206
+ PREDICTOR_DIR = _prepare_predictor_dir()
207
+ PREDICTOR = autogluon.multimodal.MultiModalPredictor.load(PREDICTOR_DIR)
208
+ PREDICTOR_LOAD_STATUS = "✅ AutoGluon Predictor loaded successfully."
209
+ print(PREDICTOR_LOAD_STATUS)
210
+ except Exception as e:
211
+ PREDICTOR_LOAD_STATUS = f"❌ Failed to load AutoGluon Predictor: {e}"
212
+ print(PREDICTOR_LOAD_STATUS)
213
+ # Set PREDICTOR to None so prediction function can handle the failure gracefully
214
+ PREDICTOR = None
215
+
216
+
217
+ def do_predict(pil_img: PIL.Image.Image):
218
+ """Performs inference using the loaded MultiModalPredictor."""
219
+ # Ensure the predictor is available
220
+ if PREDICTOR is None:
221
+ return {"Error": 1.0}, "Model not loaded. Check logs.", ""
222
+
223
+ if pil_img is None:
224
+ return {"No Image": 1.0}, "No image provided.", ""
225
+
226
+ # Save to temp file for AutoGluon input format
227
+ tmpdir = pathlib.Path(tempfile.mkdtemp())
228
+ img_path = tmpdir / "input.png"
229
+ pil_img.save(img_path)
230
+
231
+ df = pandas.DataFrame({"image": [str(img_path)]})
232
+
233
+ # Perform prediction
234
+ proba_df = PREDICTOR.predict_proba(df)
235
+
236
+ # Rename columns using the defined CLASS_LABELS for clarity
237
+ proba_df = proba_df.rename(columns=CLASS_LABELS)
238
+ row = proba_df.iloc[0]
239
+
240
+ # Format result for Gradio Label component
241
+ pretty_dict = {
242
+ label: float(row.get(label, 0.0)) for label in CLASS_LABELS.values()
243
+ }
244
+
245
+ # Prepare confidence string
246
+ # Assuming two classes, provide probability for each
247
+ confidence_info = ", ".join([
248
+ f"{label}: {prob:.2f}" for label, prob in pretty_dict.items()
249
+ ])
250
+
251
+ return pretty_dict, confidence_info
252
+
253
+ # ==============================================================================
254
+ # CELL 4: KERNEL DENSITY ESTIMATION (KDE) CORE LOGIC
255
+ # Must be run after Cell 1 (Imports)
256
+ # ==============================================================================
257
+
258
+ # --- Necessary Imports for KDE (mostly pulled from the provided prototype) ---
259
+ from scipy.stats import gaussian_kde
260
+ import numpy as np
261
+ import os
262
+ import matplotlib.pyplot as plt
263
+ import matplotlib.cm as cm
264
+ import folium
265
+ import matplotlib.colors
266
+ import pandas as pd
267
+ from PIL import Image
268
+ import io
269
+ from folium import Marker # We need Marker for plotting points
270
+
271
+ # --- Organized version #1: Define Pittsburgh Coordinate Range ---
272
+ # Define the latitude and longitude boundaries for the Pittsburgh area
273
+ pittsburgh_lat_min, pittsburgh_lat_max = 40.3, 40.6
274
+ pittsburgh_lon_min, pittsburgh_lon_max = -80.2, -79.8
275
+ pittsburgh_lat = 40.4406 # Example center latitude
276
+ pittsburgh_lon = -79.9959 # Example center longitude
277
+
278
+ # Define the number of points for each distribution
279
+ num_points = 500
280
+
281
+ # --- Organized version #2: Generate and save temporary CSV files ---
282
+
283
+ # Helper functions for generating different spatial distributions
284
+ def generate_uniform_points(lat_min, lat_max, lon_min, lon_max, num_points):
285
+ lats = np.random.uniform(lat_min, lat_max, num_points)
286
+ lons = np.random.uniform(lon_min, lon_max, num_points)
287
+ return pd.DataFrame({'latitude': lats, 'longitude': lons})
288
+
289
+ def generate_normal_points(center_lat, center_lon, lat_std, lon_std, num_points):
290
+ lats = np.random.normal(center_lat, lat_std, num_points)
291
+ lons = np.random.normal(center_lon, lon_std, num_points)
292
+ valid_indices = (lats >= pittsburgh_lat_min) & (lats <= pittsburgh_lat_max) & (lons >= pittsburgh_lon_min) & (lons <= pittsburgh_lon_max)
293
+ return pd.DataFrame({'latitude': lats[valid_indices], 'longitude': lons[valid_indices]})
294
+
295
+ def generate_bimodal_points(center1_lat, center1_lon, center2_lat, center2_lon, lat_std, lon_std, num_points):
296
+ num_points_half = num_points // 2
297
+ lats1 = np.random.normal(center1_lat, lat_std, num_points_half)
298
+ lons1 = np.random.normal(center1_lon, lon_std, num_points_half)
299
+ lats2 = np.random.normal(center2_lat, lat_std, num_points - num_points_half)
300
+ lons2 = np.random.normal(center2_lon, lon_std, num_points - num_points_half)
301
+ lats = np.concatenate([lats1, lats2])
302
+ lons = np.concatenate([lons1, lons2])
303
+ valid_indices = (lats >= pittsburgh_lat_min) & (lats <= pittsburgh_lat_max) & (lons >= pittsburgh_lon_min) & (lons <= pittsburgh_lon_max)
304
+ return pd.DataFrame({'latitude': lats[valid_indices], 'longitude': lons[valid_indices]})
305
+
306
+ def generate_poisson_like_points(lat_min, lat_max, lon_min, lon_max, num_points, num_clusters=10, cluster_std=0.01):
307
+ all_lats, all_lons = [], []
308
+ points_per_cluster = num_points // num_clusters
309
+ cluster_centers_lat = np.random.uniform(lat_min + cluster_std, lat_max - cluster_std, num_clusters)
310
+ cluster_centers_lon = np.random.uniform(lon_min + cluster_std, lon_max - cluster_std, num_clusters)
311
+ for i in range(num_clusters):
312
+ lats = np.random.normal(cluster_centers_lat[i], cluster_std, points_per_cluster)
313
+ lons = np.random.normal(cluster_centers_lon[i], cluster_std, points_per_cluster)
314
+ all_lats.extend(lats)
315
+ all_lons.extend(lons)
316
+ lats = np.array(all_lats)
317
+ lons = np.array(all_lons)
318
+ valid_indices = (lats >= lat_min) & (lats <= lat_max) & (lons >= lon_min) & (lons <= lon_max)
319
+ return pd.DataFrame({'latitude': lats[valid_indices], 'longitude': lons[valid_indices]})
320
+
321
+ # Generate and save all datasets
322
+ uniform_df = generate_uniform_points(pittsburgh_lat_min, pittsburgh_lat_max, pittsburgh_lon_min, pittsburgh_lon_max, num_points)
323
+ normal_df = generate_normal_points(pittsburgh_lat, pittsburgh_lon, 0.05, 0.05, num_points)
324
+ bimodal_center1_lat, bimodal_center1_lon = 40.4, -80.1
325
+ bimodal_center2_lat, bimodal_center2_lon = 40.5, -79.9
326
+ bimodal_df = generate_bimodal_points(bimodal_center1_lat, bimodal_center1_lon, bimodal_center2_lat, bimodal_center2_lon, 0.03, 0.03, num_points)
327
+ poisson_like_df = generate_poisson_like_points(pittsburgh_lat_min, pittsburgh_lat_max, pittsburgh_lon_min, pittsburgh_lon_max, num_points)
328
+
329
+ csv_dir = "spatial_data"
330
+ os.makedirs(csv_dir, exist_ok=True)
331
+
332
+ distribution_files = {
333
+ "Uniform": os.path.join(csv_dir, "uniform_coords.csv"),
334
+ "Normal": os.path.join(csv_dir, "normal_coords.csv"),
335
+ "Bimodal": os.path.join(csv_dir, "bimodal_coords.csv"),
336
+ "Poisson-like": os.path.join(csv_dir, "poisson_like_coords.csv")
337
+ }
338
+
339
+ uniform_df.to_csv(distribution_files["Uniform"], index=False)
340
+ normal_df.to_csv(distribution_files["Normal"], index=False)
341
+ bimodal_df.to_csv(distribution_files["Bimodal"], index=False)
342
+ poisson_like_df.to_csv(distribution_files["Poisson-like"], index=False)
343
+
344
+ print("✅ Sample spatial data files generated and saved to 'spatial_data' directory.")
345
+
346
+
347
+ # --- Organized version #3 & #4: KDE Calculation and Plotting Functions ---
348
+
349
+ def load_data_and_calculate_kde(distribution_name):
350
+ """Loads data, checks columns, and computes the gaussian KDE object."""
351
+ file_path = distribution_files.get(distribution_name)
352
+ if file_path is None:
353
+ return None, None, None, f"Error: Unknown distribution name '{distribution_name}'"
354
+
355
+ try:
356
+ df = pd.read_csv(file_path)
357
+ if 'latitude' not in df.columns or 'longitude' not in df.columns:
358
+ return None, None, None, f"Error: CSV must contain 'latitude' and 'longitude' columns."
359
+
360
+ latitudes = df['latitude'].values
361
+ longitudes = df['longitude'].values
362
+ coordinates = np.vstack([longitudes, latitudes]) # [Lons, Lats] for KDE
363
+ kde_object = gaussian_kde(coordinates)
364
+
365
+ return latitudes, longitudes, kde_object, None
366
+
367
+ except Exception as e:
368
+ return None, None, None, f"Error loading data or calculating KDE: {e}"
369
+
370
+
371
+ def plot_kde_and_points(min_lat, max_lat, min_lon, max_lon, original_latitudes, original_longitudes, kde_object):
372
+ """Generates a static KDE heatmap (Matplotlib) and an interactive Folium map."""
373
+
374
+ # --- 1. Matplotlib Static Heatmap ---
375
+ x, y = np.mgrid[min_lon:max_lon:100j, min_lat:max_lat:100j]
376
+ positions = np.vstack([x.ravel(), y.ravel()])
377
+ z = kde_object(positions)
378
+ z = z.reshape(x.shape)
379
+ z_normalized = (z - z.min()) / (z.max() - z.min()) if z.max() > z.min() else np.zeros_like(z)
380
+
381
+ fig, ax = plt.subplots(figsize=(8, 8))
382
+ im = ax.imshow(z_normalized.T, origin='lower',
383
+ extent=[min_lon, max_lon, min_lat, max_lat],
384
+ cmap='hot', aspect='auto')
385
+ fig.colorbar(im, ax=ax, label='Density')
386
+ ax.set_xlabel('Longitude')
387
+ ax.set_ylabel('Latitude')
388
+ ax.set_title('Kernel Density Estimate Heatmap (Static)')
389
+
390
+ # Convert plot to PIL Image
391
+ buf = io.BytesIO()
392
+ plt.savefig(buf, format='png', bbox_inches='tight')
393
+ buf.seek(0)
394
+ pil_image = Image.open(buf)
395
+ plt.close(fig)
396
+
397
+ # --- 2. Folium Interactive Map with Colored Points ---
398
+ original_coordinates = np.vstack([original_longitudes, original_latitudes])
399
+ density_at_original_points = kde_object(original_coordinates)
400
+ density_min = density_at_original_points.min()
401
+ density_max = density_at_original_points.max()
402
+ density_normalized = (density_at_original_points - density_min) / (density_max - density_min + 1e-9)
403
+
404
+ colormap = cm.get_cmap('viridis')
405
+ map_center_lat = np.mean(original_latitudes)
406
+ map_center_lon = np.mean(original_longitudes)
407
+ m_colored_points = folium.Map(location=[map_center_lat, map_center_lon], zoom_start=10)
408
+
409
+ for lat, lon, density_norm in zip(original_latitudes, original_longitudes, density_normalized):
410
+ color = matplotlib.colors.rgb2hex(colormap(density_norm))
411
+ folium.CircleMarker(
412
+ location=[lat, lon],
413
+ radius=5,
414
+ color=color,
415
+ fill=True,
416
+ fill_color=color,
417
+ fill_opacity=0.7,
418
+ tooltip=f"Density: {kde_object([lon, lat])[0]:.4f}"
419
+ ).add_to(m_colored_points)
420
+
421
+ # Convert Folium map to HTML
422
+ colored_points_map_html = m_colored_points._repr_html_()
423
+
424
+ return pil_image, colored_points_map_html
425
+
426
+ # Define the main function that will be called by Gradio
427
+ def update_visualization(distribution_name):
428
+ """Loads data, calculates KDE, and generates visualizations for Gradio."""
429
+ latitudes, longitudes, kde_object, error = load_data_and_calculate_kde(distribution_name)
430
+
431
+ if error:
432
+ # Return placeholder outputs and the error message
433
+ return None, f"<h2>Error</h2><p>{error}</p>", error # Return error message in HTML
434
+
435
+ # Generate visualizations using the Pittsburgh bounds
436
+ pil_image, colored_points_map_html = plot_kde_and_points(
437
+ pittsburgh_lat_min, pittsburgh_lat_max, pittsburgh_lon_min, pittsburgh_lon_max,
438
+ latitudes, longitudes, kde_object
439
+ )
440
+
441
+ return pil_image, colored_points_map_html, ""
442
+
443
+ # =====================================================================================
444
+ # CELL 4: GRADIO UI DEFINITIONS (Three Tabs)
445
+ # =====================================================================================
446
+
447
+ # UPDATED: Accept the shared image component as an argument
448
+ def field_capture_ui(camera):
449
+ with gr.Blocks():
450
+ gr.Markdown("# 🦋 Lanternfly Data Logging")
451
+ gr.Markdown("Input location data for the uploaded photo. GPS functionality is now enabled!")
452
+
453
+ with gr.Column(scale=1):
454
+
455
+ # REMOVED: The redundant gr.Image component
456
+
457
+ gr.Markdown("### 📍 Location Data")
458
+ gr.Markdown("Click 'Get GPS' to automatically capture your location, or manually enter coordinates.")
459
+
460
+ # GPS Button (now functional)
461
+ gps_btn = gr.Button("📍 Get GPS", variant="primary", elem_id="gps_btn_id")
462
+
463
+ # Hidden Input (kept for UI layout and future restoration)
464
+ hidden_gps_input = gr.Textbox(visible=False, elem_id="hidden_gps_input")
465
+
466
+ with gr.Row():
467
+ lat_box = gr.Textbox(label="Latitude", interactive=True, value="0.0", elem_id="lat")
468
+ lon_box = gr.Textbox(label="Longitude", interactive=True, value="0.0", elem_id="lon")
469
+
470
+ with gr.Row():
471
+ accuracy_box = gr.Textbox(label="Accuracy (meters)", interactive=True, value="0.0", elem_id="accuracy")
472
+ device_ts_box = gr.Textbox(label="Device Timestamp", interactive=True, elem_id="device_ts")
473
+
474
+ time_btn = gr.Button("🕐 Get Current Time", variant="secondary")
475
+ save_btn = gr.Button("💾 Save (Test Mode)", variant="secondary")
476
+
477
+ status = gr.Markdown("🔄 **Ready. Saving is in test mode.**")
478
+ preview = gr.JSON(label="Preview JSON", visible=True)
479
+
480
+ # Event handlers (using placeholders/NoAction)
481
+
482
+ # GPS Button (Click event to trigger JavaScript GPS function)
483
+ gps_btn.click(
484
+ fn=None, inputs=[], outputs=[], js=get_gps_js()
485
+ )
486
+
487
+ hidden_gps_input.change(
488
+ fn=handle_gps_location,
489
+ inputs=[hidden_gps_input],
490
+ outputs=[status, lat_box, lon_box, accuracy_box, device_ts_box]
491
+ )
492
+
493
+ time_btn.click(
494
+ fn=placeholder_time_capture,
495
+ inputs=[],
496
+ outputs=[status, device_ts_box]
497
+ )
498
+
499
+ # The Save button now uses the passed 'camera' component
500
+ save_btn.click(
501
+ fn=placeholder_save_action,
502
+ inputs=[camera, lat_box, lon_box, accuracy_box, device_ts_box],
503
+ outputs=[status, preview]
504
+ )
505
+
506
+ # Return the output components needed by the main app structure
507
+ return status, preview
508
+
509
+ # UPDATED: Accept the shared image component as an argument
510
+ def image_model_ui(image_in):
511
+ with gr.Blocks():
512
+ gr.Markdown("# 🤖 Image Classification Results")
513
+ gr.Markdown("Uses an AutoGluon multimodal model to classify the uploaded image.")
514
+
515
+ if PREDICTOR is None:
516
+ gr.Warning(PREDICTOR_LOAD_STATUS)
517
+
518
+ # REMOVED: The redundant gr.Image component
519
+
520
+ with gr.Row():
521
+ proba_pretty = gr.Label(num_top_classes=2, label="Class Probabilities")
522
+ confidence_output = gr.Textbox(label="Prediction Summary")
523
+
524
+ # Attach prediction logic to the passed-in image component
525
+ image_in.change(
526
+ fn=do_predict,
527
+ inputs=[image_in],
528
+ outputs=[proba_pretty, confidence_output]
529
+ )
530
+
531
+ gr.Examples(
532
+ examples=["/content/hf_assets/predictor_native/image/0.png", "/content/hf_assets/predictor_native/image/1.png"],
533
+ inputs=[image_in],
534
+ label="Representative Examples (Files must be present after model download)",
535
+ examples_per_page=2,
536
+ cache_examples=False,
537
+ )
538
+
539
+ def kde_analysis_ui():
540
+ distribution_choices = list(distribution_files.keys())
541
+
542
+ with gr.Blocks():
543
+ gr.Markdown("# 🗺️ Spatial Analysis (KDE)")
544
+ gr.Markdown("Visualizes the Kernel Density Estimate (KDE) for different synthetic spatial distributions around Pittsburgh.")
545
+
546
+ gr.Warning("Data generation occurs on app load and is randomized.")
547
+
548
+ dropdown = gr.Dropdown(
549
+ choices=distribution_choices,
550
+ label="Select Spatial Distribution",
551
+ value=distribution_choices[0]
552
+ )
553
+
554
+ with gr.Row():
555
+ static_map = gr.Image(label="Static Kernel Density Map (Matplotlib)")
556
+ interactive_map = gr.HTML(label="Interactive Points Map Colored by KDE (Folium)")
557
+
558
+ error_box = gr.Textbox(label="Error Message", visible=False)
559
+
560
+ # Initial call to populate maps on change
561
+ dropdown.change(
562
+ fn=update_visualization,
563
+ inputs=[dropdown],
564
+ outputs=[static_map, interactive_map, error_box]
565
+ )
566
+
567
+
568
+ # =====================================================================================
569
+ # MAIN APP LAUNCH
570
+ # =====================================================================================
571
+
572
+ # Define the final application container with two main tabs
573
+ with gr.Blocks(title="Unified Lanternfly App") as app:
574
+
575
+ # TAB 1: COMBINED CAPTURE AND CLASSIFICATION
576
+ with gr.Tab("Capture & Classification"):
577
+ gr.Info("GPS functionality is now enabled! Data saving is in test mode.")
578
+
579
+ # NEW: Define the single, shared image input here
580
+ shared_image_input = gr.Image(
581
+ streaming=False, height=380, label="📷 Upload Photo (or use camera)",
582
+ type="pil", sources=["webcam", "upload"]
583
+ )
584
+
585
+ # NEW: Layout the single image and the two UI blocks side-by-side
586
+ with gr.Row():
587
+ with gr.Column(scale=1):
588
+ field_capture_ui(shared_image_input)
589
+
590
+ with gr.Column(scale=1):
591
+ # Pass the shared input to the model UI
592
+ image_model_ui(shared_image_input)
593
+
594
+ # TAB 2: KDE ANALYSIS
595
+ with gr.Tab("Spatial Analysis (KDE)"):
596
+ # 1. Define the UI components needed for output (hidden)
597
+ dropdown = gr.Dropdown(
598
+ choices=list(distribution_files.keys()),
599
+ value=list(distribution_files.keys())[0],
600
+ visible=False # Hidden because we redefine it in kde_analysis_ui
601
+ )
602
+ static_map_out = gr.Image(visible=False)
603
+ interactive_map_out = gr.HTML(visible=False)
604
+ error_box_out = gr.Textbox(visible=False)
605
+
606
+ # 2. Render the KDE UI (which defines its own visible components)
607
+ kde_analysis_ui()
608
+
609
+ # Trigger initial KDE load using the top-level app.load() event
610
+ app.load(
611
+ fn=update_visualization,
612
+ inputs=[dropdown], # Pass the default value from the hidden dropdown
613
+ outputs=[static_map_out, interactive_map_out, error_box_out], # Dummy outputs to satisfy the call
614
+ queue=False
615
+ )
616
+
617
+ if __name__ == "__main__":
618
+ app.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ autogluon.multimodal
3
+
4
+ # --- Scientific, Data, and Utility Dependencies ---
5
+ pandas
6
+ numpy
7
+ scipy
8
+ Pillow
9
+ huggingface-hub
10
+ matplotlib
11
+ folium