parent
4bf0a6b230
commit
1f4d74707a
|
@ -15,6 +15,8 @@ from PIL import Image
|
|||
import resource
|
||||
import argparse
|
||||
import pickle
|
||||
from multiprocessing import Pool
|
||||
from functools import partial
|
||||
|
||||
# -------------
|
||||
# MEMORY SAFETY
|
||||
|
@ -29,8 +31,9 @@ resource.setrlimit(resource.RLIMIT_AS, (memory_limit_gb * 1024**3, hard))
|
|||
IMG_H = 160 # On better gpu use 256 and adam optimizer
|
||||
IMG_W = IMG_H * 2
|
||||
DATASET_PATHS = [
|
||||
"../data_scrape/dataset/dataset/",
|
||||
"../datasets/train/",
|
||||
]
|
||||
LINE="\n----------------------------------------\n"
|
||||
|
||||
# configuring device
|
||||
if torch.cuda.is_available():
|
||||
|
@ -60,39 +63,45 @@ class GEImagePreprocess:
|
|||
self.training_set = []
|
||||
self.validation_set = []
|
||||
self.test_set = []
|
||||
self.entry_paths = []
|
||||
self.patch_w = patch_w
|
||||
self.patch_h = patch_h
|
||||
|
||||
def load_images(self):
|
||||
self.load_images_recursively(self.path)
|
||||
self.split_dataset()
|
||||
self.get_entry_paths(self.path)
|
||||
load_image_partial = partial(self.load_image_helper)
|
||||
with Pool() as pool:
|
||||
results = pool.map(load_image_partial, self.entry_paths)
|
||||
self.split_dataset(results)
|
||||
return self.training_set, self.validation_set, self.test_set
|
||||
|
||||
def load_images_recursively(self, path):
|
||||
images = os.listdir(path)
|
||||
for image in images:
|
||||
if os.path.isdir(path + image):
|
||||
self.load_images_recursively(path + image + "/")
|
||||
if image.endswith(".jpeg"):
|
||||
img = Image.open(path + image)
|
||||
self.training_set.append(self.preprocess_image(img))
|
||||
|
||||
def split_dataset(self):
|
||||
training_set = []
|
||||
validation_set = []
|
||||
test_set = []
|
||||
|
||||
for image in tqdm(range(len(self.training_set)), desc="Splitting dataset"):
|
||||
if image % 30 == 0:
|
||||
validation_set.append(self.training_set[image])
|
||||
elif image % 30 == 1:
|
||||
test_set.append(self.training_set[image])
|
||||
else:
|
||||
training_set.append(self.training_set[image])
|
||||
self.training_set = training_set
|
||||
self.validation_set = validation_set
|
||||
self.test_set = test_set
|
||||
|
||||
def load_image_helper(self, entry_path):
|
||||
try:
|
||||
img = Image.open(entry_path)
|
||||
except PIL.UnidentifiedImageError as e:
|
||||
print("Could not open an image: ", entry_path)
|
||||
print(e)
|
||||
return None
|
||||
return self.preprocess_image(img)
|
||||
|
||||
def get_entry_paths(self, path):
|
||||
entries = os.listdir(path)
|
||||
for entry in entries:
|
||||
entry_path = path + "/" + entry
|
||||
if os.path.isdir(entry_path):
|
||||
self.get_entry_paths(entry_path + "/")
|
||||
if entry_path.endswith(".jpeg"):
|
||||
self.entry_paths.append(entry_path)
|
||||
|
||||
def split_dataset(self, dataset):
|
||||
for image in tqdm(range(len(dataset)), desc="Splitting dataset"):
|
||||
if image % 30 == 0:
|
||||
self.validation_set.append(dataset[image])
|
||||
elif image % 30 == 1:
|
||||
self.test_set.append(dataset[image])
|
||||
else:
|
||||
self.training_set.append(dataset[image])
|
||||
|
||||
def preprocess_image(self, image):
|
||||
image = image.resize((IMG_W, IMG_H))
|
||||
image = image.convert("L")
|
||||
|
@ -490,9 +499,6 @@ def preprocess_data():
|
|||
validation_images.extend(val)
|
||||
test_images.extend(test)
|
||||
|
||||
print(
|
||||
f"Training on {len(training_images)} images, validating on {len(validation_images)} images, testing on {len(test_images)} images"
|
||||
)
|
||||
# creating pytorch datasets
|
||||
training_data = GEDataset(
|
||||
training_images,
|
||||
|
@ -560,12 +566,12 @@ def main():
|
|||
model.store_model()
|
||||
|
||||
elif args.test:
|
||||
t, v, td = preprocess_data()
|
||||
_, _, td = preprocess_data()
|
||||
model = ConvolutionalAutoencoder(Autoencoder(Encoder(), Decoder()))
|
||||
model.test(nn.MSELoss(reduction="sum"), td)
|
||||
|
||||
elif args.encode:
|
||||
t, v, td = preprocess_data()
|
||||
_, _, td = preprocess_data()
|
||||
model = ConvolutionalAutoencoder(Autoencoder(Encoder(), Decoder()))
|
||||
model.encode_images(td)
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
dataset/*
|
|
@ -1,39 +0,0 @@
|
|||
LAT_START = 45.9887
|
||||
LAT_END = 46.1339
|
||||
LNG_START = 14.4431
|
||||
LNG_END = 14.5910
|
||||
|
||||
TILTS = [i for i in range(10, 60, 20)]
|
||||
HEADINGS = [i for i in range(0, 360, 90)]
|
||||
|
||||
import numpy as np
|
||||
|
||||
## 3
|
||||
## 4
|
||||
#driver.save_screenshot('screenshot.png')
|
||||
## 5
|
||||
#driver.quit()
|
||||
|
||||
def generate_coordinates():
|
||||
urls = []
|
||||
latitudes = np.arange(LAT_START, LAT_END, 0.001)
|
||||
longitudes = np.arange(LNG_START, LNG_END, 0.001)
|
||||
for latitude in latitudes:
|
||||
for longitude in longitudes:
|
||||
for tilt in TILTS:
|
||||
params=f'?lng={longitude}&lat={latitude}&tilt={tilt}&heading=0'
|
||||
url = f'http://localhost:8000/index.html{params}'
|
||||
urls.append(url)
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
urls = generate_coordinates()
|
||||
|
||||
# Write url line by line to text file
|
||||
|
||||
with open("urls.txt", "w") as output:
|
||||
for url in urls:
|
||||
output.write(url + "\n")
|
||||
|
||||
output.close()
|
|
@ -1,54 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<script
|
||||
src="#"
|
||||
defer></script>
|
||||
<script src="https://printjs-4de6.kxcdn.com/print.min.js"></script>
|
||||
<script src="https://html2canvas.hertzen.com/dist/html2canvas.min.js"></script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="map" style="width: 2000px; height: 1000px;"></div>
|
||||
</body>
|
||||
<script>
|
||||
//let lat_start = 45.9887;
|
||||
//let lng_start = 14.4431;
|
||||
//let lat_end = 46.1339;
|
||||
//let lng_end = 14.5910;
|
||||
//tilts = [0, 45]; // Tilt goes from 0 to 90
|
||||
// headings go from 0 to 360
|
||||
//headings = [0, 90, 180, 270];
|
||||
// Dej to nrdi pol s pitonko
|
||||
const queryString = window.location.search;
|
||||
console.log(queryString);
|
||||
const urlParams = new URLSearchParams(queryString);
|
||||
const lt = parseFloat(urlParams.get('lat'));
|
||||
const lg = parseFloat(urlParams.get('lng'));
|
||||
const tilt = parseInt(urlParams.get('tilt'));
|
||||
const heading = parseInt(urlParams.get('heading'));
|
||||
console.log(lt, lg, tilt, heading);
|
||||
window.onload = async () => {
|
||||
map = new google.maps.Map(document.getElementById("map"), {
|
||||
center: {
|
||||
lat: lt,
|
||||
lng: lg,
|
||||
},
|
||||
zoom: 19,
|
||||
mapTypeId: "satellite",
|
||||
// hide the default map controls
|
||||
disableDefaultUI: true,
|
||||
// disable watermark hehe
|
||||
clickableIcons: false,
|
||||
|
||||
});
|
||||
map.setTilt(tilt); // 0, 45
|
||||
map.setHeading(heading); // 0, 90, 180, 270
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
}
|
||||
|
||||
|
||||
</script>
|
||||
|
||||
</html>
|
64824
data_scrape/urls.txt
64824
data_scrape/urls.txt
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue