Skip to content

Commit

Permalink
Upload all code
Browse files Browse the repository at this point in the history
  • Loading branch information
gghg1989 committed Feb 15, 2020
1 parent fd75358 commit f4cd313
Show file tree
Hide file tree
Showing 76 changed files with 5,860 additions and 0 deletions.
20 changes: 20 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
.DS_Store
*/.DS_Store
Thumbs.db
node_modules/*
*.idea
*~
package-lock.json
.vscode
.idea/
.idea/*
.venv
venv
house*/*
log/
data/
*.ipynb
*/*.ipynb
.ipynb_checkpoints
*/.ipynb_checkpoints
output*/
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,14 @@ Smart cities, utilities, third-parties, and government agencies are having press

To address the problem, we design a new system---"SolarFinder" that can automatically detect distributed solar photovoltaic arrays in a given geospatial region without any extra cost. SolarFinder first automatically fetches low/regular resolution satellite images within the region using publicly-available imagery APIs. Then, SolarFinder leverages multi-dimensional K-means algorithm to automatically segment solar arrays on rooftop images. Eventually, SolarFinder employs hybrid linear regression approach that integrates support vectors machine (SVMs-RBF) modeling with a deep convolutional neural network (CNNs) approach to accurately identify solar arrays and characterize each solar deployment simultaneously. We evaluate SolarFinder using 269,632 public satellite images that include 1,143,636 contours from 13 geospatial regions in U.S. We find that pre-trained SolarFinder yields a MCC of 0.17, which is 3 times better than the most recent pre-trained CNNs approach and is the same as a re-trained CNNs approach.

## Pre-request Environment

* python3
* pip latest version
* Install dependencies
```sh
$ pip install -r requirements.txt
```

SolarFinder work is published at the 19th ACM/IEEE Conference on Information Processing in Sensor Networks (IPSN 2020).
If you use our code or datasets in your research, please consider cite our work:
82 changes: 82 additions & 0 deletions data_collection/osm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# this file is used to generate the url to download images from google static map,
# for every house, we store the url to download the house original images and house mask.
# the input is osm file, output is json which store the ulr and csv document which store the id and location of houses.
import csv
import datetime
import glob as gb
import json
import os
import xml.dom.minidom
import xml.dom.minidom

# used to calculate the download images time
start = datetime.datetime.now()
# osm_path which is the osm file location
osm_path = gb.glob("/*.osm")
for osm in osm_path:
dom = xml.dom.minidom.parse(osm)
num = osm.split("/")[-1]
num = os.path.splitext(num)[0]
# dom = xml.dom.minidom.parse('./0.osm')
root = dom.documentElement
nodelist = root.getElementsByTagName('node')
waylist = root.getElementsByTagName('way')
node_dic = {}

url_prefix1 = 'https://maps.googleapis.com/maps/api/staticmap?zoom=20&size=400x400&scale=4&maptype=hybrid&path=color:0xff0000ff%7Cweight:5%7Cfillcolor:0xff0000ff'
url_prefix2 = 'https://maps.googleapis.com/maps/api/staticmap?zoom=20&size=400x400&scale=4&maptype=hybrid&path=color:0x00000000%7Cweight:5%7Cfillcolor:0x00000000'
url_suffix = '&key=AIzaSyA7UVGBz0YP8OPQnQ9Suz69_u1TUSDukt8'

for node in nodelist:
node_id = node.getAttribute('id')
node_lat = float(node.getAttribute('lat'))
node_lon = float(node.getAttribute('lon'))
node_dic[node_id] = (node_lat, node_lon)
url = []
location = {}
csv_lat = 0
csv_lon = 0
num_img = 0
# json used to store the url of images downloading
with open(os.path.join('./10house/house1/', format(str(num)) + '.json'), 'w') as json_file:
for way in waylist:
taglist = way.getElementsByTagName('tag')
build_flag = False
for tag in taglist:
# choose the attribute to be building,
if tag.getAttribute('k') == 'building':
build_flag = True
if build_flag:
ndlist = way.getElementsByTagName('nd')
s = ""
for nd in ndlist:
nd_id = nd.getAttribute('ref')
if nd_id in node_dic:
node_lat = node_dic[nd_id][0]
node_lon = node_dic[nd_id][1]
g = nd_id
csv_lat = node_dic[nd_id][0]
csv_lon = node_dic[nd_id][1]
print(g)
s += '%7C' + str(node_lat) + '%2C' + str(node_lon)
# secret = 'pSRLFZI7ujDivoNjR-Vz7GR6F4Q='
url1 = url_prefix1 + s + url_suffix
# url1 = sign_url(url1, secret)
url2 = url_prefix2 + s + url_suffix
# url2 = sign_url(url2, secret)
test_dict = {"id": g, "mask": url1, "image": url2}
url.append(test_dict)
location[g] = str(csv_lat) + ',' + str(csv_lon)
num_img = num_img + 1
json_str = json.dumps(url)
json_file.write(json_str)
json_file.close()
# csv document used to store the house id and location( latitude and longtitude)
csv_path = "./10house/house1/house1.csv"
with open(csv_path, 'a') as csv_file:
writer = csv.writer(csv_file)
for key, value in location.items():
writer.writerow([key, value])
csv_file.close()
end = datetime.datetime.now()
print(end - start)
42 changes: 42 additions & 0 deletions data_collection/roof.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# this is used to download the roof images from google static map ,
# we download the original images and mask from google static map (free) then use and operation to get the roof ROI,
# so we can process and label the roof images
import glob as gb
import json
import os

import cv2
import numpy as np
import requests

i = 0
json_path = gb.glob("./10house/house1/map.json")
for file in json_path:
with open(file, 'r') as file:
urls = json.load(file)
for url in urls:
i = i + 1
id = url['id']
mask = url['mask']
image = url['image']
mask = requests.get(mask)
image = requests.get(image)
fmask = open(os.path.join('./10house/house1/image/', format(str('1')) + '.png'), 'ab')
fimg = open(os.path.join('./10house/house1/mask/', format(str('1')) + '.png'), 'ab')
fmask.write(mask.content)
fimg.write(image.content)
fmask.close()
fimg.close()
tag = cv2.imread(os.path.join('./10house/house1/image/', format('1') + '.png'))
real = cv2.imread(os.path.join('./10house/house1/mask/', format('1') + '.png'))
lower = np.array([0, 0, 100])
upper = np.array([40, 40, 255])
img = cv2.inRange(tag, lower, upper)

# and operations with images
img = np.expand_dims(img, axis=2)
img = np.concatenate((img, img, img), axis=-1)
result = cv2.bitwise_and(real, img)
cv2.imwrite(os.path.join('./10house/house1/roof/' + format(str(id)) + '.png'), result)
os.remove("./10house/house1/image/1.png")
os.remove("./10house/house1/mask/1.png")
32 changes: 32 additions & 0 deletions data_collection/sign_url.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import base64
import hashlib
import hmac
from urllib.parse import urlparse


# sign the ulr so there is no limit to download the images from google static map,
# but it may cause extra fees.
def sign_url(input_url=None, secret=None):
if not input_url or not secret:
raise Exception("Both input_url and secret are required")

url = urlparse(input_url)

# We only need to sign the path+query part of the string
url_to_sign = url.path + "?" + url.query

# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(secret)

# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, url_to_sign.encode(), hashlib.sha1)

# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())

original_url = url.scheme + "://" + url.netloc + url.path + "?" + url.query

# Return signed URL
return original_url + "&signature=" + encoded_signature.decode()
19 changes: 19 additions & 0 deletions data_preprocessing/add_angle70.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import csv


data = pd.read_csv(".csv")
df = pd.DataFrame(data)
data1 =pd.read_csv("/contour_all.csv")
df1 = pd.DataFrame(data1)

angle70 = df1.iloc[:,13]
df.insert(13, "numangle70", angle70, True)

export_csv = df.to_csv ('/location810/angle70.csv'index=None)



Loading

0 comments on commit f4cd313

Please sign in to comment.