Add parse script

main
Gašper Spagnolo 2023-03-20 16:59:01 +01:00
parent 81534dd112
commit 8614fd7312
5 changed files with 64930 additions and 13 deletions

View File

@ -30,6 +30,10 @@ IMG_H = 160 # On better gpu use 256 and adam optimizer
IMG_W = IMG_H * 2
DATASET_PATHS = [
"../../diplomska/datasets/oj/montreal_trial1/ge_images/images/",
#"../../diplomska/datasets/oj/montreal_trial1/teach/images/",
#"../../diplomska/datasets/oj/suffield_trial1/teach/images/",
#"../../diplomska/datasets/oj/utiascircle_trial1/teach/images/",
#"../../diplomska/datasets/oj/utiasday_trial1/teach/images/",
]
# configuring device
@ -154,7 +158,7 @@ class Encoder(nn.Module):
stride=stride,
),
nn.BatchNorm2d(out_channels),
nn.Dropout(0.3),
nn.Dropout(0.4),
act_fn,
nn.Conv2d(
in_channels=out_channels,
@ -163,7 +167,6 @@ class Encoder(nn.Module):
stride=stride,
),
nn.BatchNorm2d(out_channels * 2),
nn.Dropout(0.2),
act_fn,
nn.Conv2d(
in_channels=out_channels * 2,
@ -172,7 +175,7 @@ class Encoder(nn.Module):
stride=stride,
),
nn.BatchNorm2d(out_channels * 4),
nn.Dropout(0.1),
nn.Dropout(0.3),
act_fn,
nn.Conv2d(
in_channels=out_channels * 4,
@ -181,6 +184,7 @@ class Encoder(nn.Module):
stride=stride,
),
nn.BatchNorm2d(out_channels * 8),
nn.Dropout(0.1),
act_fn,
nn.Conv2d(
in_channels=out_channels * 8,
@ -371,9 +375,7 @@ class ConvolutionalAutoencoder:
# reconstructing images
output = self.network(val_images)
# computing validation loss
val_loss = loss_function(
output, val_images.view(-1, 1, IMG_H, IMG_W)
)
val_loss = loss_function(output.flatten(), val_images.flatten())
# --------------
# VISUALISATION
@ -402,9 +404,7 @@ class ConvolutionalAutoencoder:
grid = grid.permute(1, 2, 0)
plt.figure(dpi=170)
plt.title(
f"Original/Reconstructed, training loss: \
{round(loss.item(), 4)} validation loss: \
{round(val_loss.item(), 4)}"
f"Original/Reconstructed, training loss: {round(loss.item(), 4)} validation loss: {round(val_loss.item(), 4)}"
)
plt.imshow(grid)
plt.axis("off")
@ -504,8 +504,7 @@ def preprocess_data():
test_images.extend(test)
print(
f"Training on {len(training_images)} images, validating on \
{len(validation_images)} images, testing on {len(test_images)} images"
f"Training on {len(training_images)} images, validating on {len(validation_images)} images, testing on {len(test_images)} images"
)
# creating pytorch datasets
training_data = GEDataset(
@ -564,7 +563,7 @@ def main():
training_data, validation_data, test_data = preprocess_data()
model = ConvolutionalAutoencoder(Autoencoder(Encoder(), Decoder()))
model.train(
nn.MSELoss(),
nn.MSELoss(reduction="sum"),
epochs=args.epochs,
batch_size=args.batch_size,
training_set=training_data,
@ -576,7 +575,7 @@ def main():
elif args.test:
t, v, td = preprocess_data()
model = ConvolutionalAutoencoder(Autoencoder(Encoder(), Decoder()))
model.test(nn.MSELoss(), td)
model.test(nn.MSELoss(reduction="sum"), td)
elif args.encode:
t, v, td = preprocess_data()

1
data_scrape/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dataset/*

39
data_scrape/bs.py Normal file
View File

@ -0,0 +1,39 @@
LAT_START = 45.9887
LAT_END = 46.1339
LNG_START = 14.4431
LNG_END = 14.5910
TILTS = [i for i in range(10, 60, 20)]
HEADINGS = [i for i in range(0, 360, 90)]
import numpy as np
## 3
## 4
#driver.save_screenshot('screenshot.png')
## 5
#driver.quit()
def generate_coordinates():
urls = []
latitudes = np.arange(LAT_START, LAT_END, 0.001)
longitudes = np.arange(LNG_START, LNG_END, 0.001)
for latitude in latitudes:
for longitude in longitudes:
for tilt in TILTS:
params=f'?lng={longitude}&lat={latitude}&tilt={tilt}&heading=0'
url = f'http://localhost:8000/index.html{params}'
urls.append(url)
return urls
urls = generate_coordinates()
# Write url line by line to text file
with open("urls.txt", "w") as output:
for url in urls:
output.write(url + "\n")
output.close()

54
data_scrape/index.html Normal file
View File

@ -0,0 +1,54 @@
<!DOCTYPE html>
<html>
<head>
<script
src="#"
defer></script>
<script src="https://printjs-4de6.kxcdn.com/print.min.js"></script>
<script src="https://html2canvas.hertzen.com/dist/html2canvas.min.js"></script>
</head>
<body>
<div id="map" style="width: 2000px; height: 1000px;"></div>
</body>
<script>
//let lat_start = 45.9887;
//let lng_start = 14.4431;
//let lat_end = 46.1339;
//let lng_end = 14.5910;
//tilts = [0, 45]; // Tilt goes from 0 to 90
// headings go from 0 to 360
//headings = [0, 90, 180, 270];
// Dej to nrdi pol s pitonko
const queryString = window.location.search;
console.log(queryString);
const urlParams = new URLSearchParams(queryString);
const lt = parseFloat(urlParams.get('lat'));
const lg = parseFloat(urlParams.get('lng'));
const tilt = parseInt(urlParams.get('tilt'));
const heading = parseInt(urlParams.get('heading'));
console.log(lt, lg, tilt, heading);
window.onload = async () => {
map = new google.maps.Map(document.getElementById("map"), {
center: {
lat: lt,
lng: lg,
},
zoom: 19,
mapTypeId: "satellite",
// hide the default map controls
disableDefaultUI: true,
// disable watermark hehe
clickableIcons: false,
});
map.setTilt(tilt); // 0, 45
map.setHeading(heading); // 0, 90, 180, 270
await new Promise(r => setTimeout(r, 1000));
}
</script>
</html>

64824
data_scrape/urls.txt Normal file

File diff suppressed because it is too large Load Diff