Boundary Detection with OpenCV

This is a project I worked on a few years ago. The demo shows the end result of using opencv for lane detection. The program detected for straight lines of the road using hough line transform and masked for the white lane color. Not great results, but it's a good start into researching/implementing deeper image processing algorithms for detection.

import numpy as np from PIL import ImageGrab import cv2 import time import pyautogui from numpy import ones,vstack from numpy.linalg import lstsq from directkeys import PressKey,ReleaseKey, W, A, S, D from statistics import mean def roi(img, vertices): #blank mask: mask = np.zeros_like(img) #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, 255) #returning the image only where mask pixels are nonzero masked = cv2.bitwise_and(img, mask) return masked def draw_lanes(img, lines, color=[0, 255, 255], thickness=3): # if this fails, go with some default line try: # finds the maximum y value for a lane marker # (since we cannot assume the horizon will always be at the same point.) ys = [] for i in lines: for ii in i: ys += [ii[1],ii[3]] min_y = min(ys) max_y = 600 new_lines = [] line_dict = {} for idx,i in enumerate(lines): for xyxy in i: # These four lines: # modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points # Used to calculate the definition of a line, given two sets of coords. x_coords = (xyxy[0],xyxy[2]) y_coords = (xyxy[1],xyxy[3]) A = vstack([x_coords,ones(len(x_coords))]).T m, b = lstsq(A, y_coords)[0] # Calculating our new, and improved, xs x1 = (min_y-b) / m x2 = (max_y-b) / m line_dict[idx] = [m,b,[int(x1), min_y, int(x2), max_y]] new_lines.append([int(x1), min_y, int(x2), max_y]) final_lanes = {} for idx in line_dict: final_lanes_copy = final_lanes.copy() m = line_dict[idx][0] b = line_dict[idx][1] line = line_dict[idx][2] if len(final_lanes) == 0: final_lanes[m] = [ [m,b,line] ] else: found_copy = False for other_ms in final_lanes_copy: if not found_copy: if abs(other_ms*1.2) > abs(m) > abs(other_ms*0.8): if abs(final_lanes_copy[other_ms][0][1]*1.2) > abs(b) > abs(final_lanes_copy[other_ms][0][1]*0.8): final_lanes[other_ms].append([m,b,line]) found_copy = True break else: final_lanes[m] = [ [m,b,line] ] line_counter = {} for lanes in final_lanes: line_counter[lanes] = len(final_lanes[lanes]) top_lanes = sorted(line_counter.items(), key=lambda item: item[1])[::-1][:2] lane1_id = top_lanes[0][0] lane2_id = top_lanes[1][0] def average_lane(lane_data): x1s = [] y1s = [] x2s = [] y2s = [] for data in lane_data: x1s.append(data[2][0]) y1s.append(data[2][1]) x2s.append(data[2][2]) y2s.append(data[2][3]) return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(mean(y2s)) l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id]) l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id]) return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2], lane1_id, lane2_id except Exception as e: print(str(e)) def process_img(image): original_image = image # edge detection processed_img = cv2.Canny(image, threshold1 = 200, threshold2=300) processed_img = cv2.GaussianBlur(processed_img,(5,5),0) vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500], ], np.int32) processed_img = roi(processed_img, [vertices]) # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html # rho theta thresh min length, max gap: lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15) m1 = 0 m2 = 0 try: l1, l2, m1,m2 = draw_lanes(original_image,lines) cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30) cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30) except Exception as e: print(str(e)) pass try: for coords in lines: coords = coords[0] try: cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3) except Exception as e: print(str(e)) except Exception as e: pass return processed_img,original_image, m1, m2 def straight(): PressKey(W) ReleaseKey(A) ReleaseKey(D) def left(): PressKey(A) ReleaseKey(W) ReleaseKey(D) ReleaseKey(A) def right(): PressKey(D) ReleaseKey(A) ReleaseKey(W) ReleaseKey(D) def slow_ya_roll(): ReleaseKey(W) ReleaseKey(A) ReleaseKey(D) for i in list(range(4))[::-1]: print(i+1) time.sleep(1) last_time = time.time() while True: screen = np.array(ImageGrab.grab(bbox=(0,40,800,640))) print('Frame took {} seconds'.format(time.time()-last_time)) last_time = time.time() new_screen,original_image, m1, m2 = process_img(screen) #cv2.imshow('window', new_screen) cv2.imshow('window2',cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)) if m1 < 0 and m2 < 0: right() elif m1 > 0 and m2 > 0: left() else: straight() #cv2.imshow('window',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break

K Nearest Neighbors Classification

This is K Nearest Neighbors Classification without any machine learning libraries. For information on K Nearest Neighbors Classification, visit the following video:


""" K Nearest Classification Code By Adam Blumenfeld @nexclap.com/AdamBlumenfeld """ # Import square root function from built-in library math from math import sqrt # Define Data data = [(1, 5, "red"), (10, 10, "blue"), (1, 4, "red"), (15, 10 , "blue"), (15, 11 , "blue"), (15, 12 , "blue"), (2, 6, "red"), (0, 8, "red"), (1, 1, "red"), (1, 5, "red"), (12, 12 , "blue"), (15, 13 , "blue"), (11, 12 , "blue"), (15, 19 , "blue"), (16, 18 , "blue")] # Euclidean Distance def euclid(x1, x2, y1, y2): return sqrt((x2-x1)**2+(y2-y1)**2) # K nearest neigbors engine def Knn(x, y, classes=[],k=3): # find three closest points to point that we want to classify closest = bubblesort(distance(x, y))[:k] # dict with count count = {} # add classes to count ### example: ### count = { ### "red":0, ### "blue":0 ### } for _class in classes: count[_class] = 0 # Add all occurences to count for point in closest: count[point[2]] += 1 # find max number of occurences of class m = [0, None] for _class in count: if count[_class] > m[0]: m[0] = count[_class] m[1] = _class # Done! return m[1] # Bubblesort algorithm (thanks google!) def bubblesort(arr): for i in range(len(arr)): for j in range(len(arr) - i - 1): if arr[j][3] > arr[j + 1][3]: temp = arr[j] arr[j] = arr[j + 1] arr[j + 1] = temp return arr # Finds distance between each each point and the point we want to classify # # For instance: (1, 5, "red") might be 3.1622776601683795 units away # So the new point will be made (1, 5, "red", 3.1622776601683795) # def distance(x, y): a = [] d = data # start recursive process return d_recured(x, y, a, d) # recursive function def d_recured(x, y, a, d): # BASE CASE: If the length of the data list is equal to 0, return the finished list with the distances if len(d) == 0: return a else: # current point current = d[0] # distance from point to point we want to classify dist = euclid(x, current[0], y, current[1]) # concatonate initial tuple to distance # example: (1, 5, "red") ----> (1, 5, "red", 3.1622776601683795) current = current + (dist,) # append new point to list a.append(current) # delete first value from list d.pop(0) return d_recured(x, y, a, d) print(Knn(0, 2, classes=["red", "blue"], k=3))

My K Nearest Neighbor Algorithm so far...without SKLearn...

Nowhere near done but I'll get there eventually...

#Importing import numpy as np import matplotlib.pyplot as plt import math as math #Data data = [(1, 2, "red"), (10, 8, "blue"), (5, 6, "green"), (4, 1, "red"), (8, 8, "blue"), (3, 1, "green"), (1, 2, "red"), (8, 10, "blue"), (6, 5, "green")] #Points x = int(input("What's your x-value?")) y = int(input("What's your y-value?")) point = (x, y) print(point) #Distance for i in range(0, 9, 1) def distance(x, y, x2, y2): return sqrt(x2-x)**2+(y2-y)**2

Support Vector Regression - Machine Learning Algorithm

Support Vector has a regression feature which creates a hyperplane in the data with the maximum margin of separation. In this case, it was used to predict stock prices. This is not the best way of making stock predictions, but it could be useful in other cases where there is a strong linear/non-linear correlation in the data. The graph shows that the RBF kernel (Radial basis function) fit this small sample of data the best. To get access to stocks, I used the quandl API - which gives free access to certain stock data sets.

from sklearn.svm import SVR import quandl import matplotlib.pyplot as plt import numpy as np dates = [] quandl.ApiConfig.api_key = "zmEJw-5s6WZrEmH4pJ7U" df = quandl.get("WIKI/GOOGL", start_date=str(2016)+"-01-01", end_date=str(2016)+"-01-21") prices = df["Close"].values.T.tolist() for i in range(1,len(prices) + 1): dates.append(i) dates = np.reshape(dates,(len(dates), 1)) #print(dates) print(prices) def predict_prices(dates, prices, x): svr_lin = SVR(kernel= 'linear', C= 1e3) svr_poly = SVR(kernel= 'poly', C= 1e3, degree= 2) svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1) # defining the support vector regression models svr_rbf.fit(dates, prices) # fitting the data points in the models svr_lin.fit(dates, prices) svr_poly.fit(dates, prices) print(prices) plt.scatter(dates, prices, color= 'black', label= 'Data') # plotting the initial datapoints plt.plot(dates, svr_rbf.predict(dates), color= 'red', label= 'RBF model') # plotting the line made by the RBF kernel plt.plot(dates,svr_lin.predict(dates), color= 'green', label= 'Linear model') # plotting the line made by linear kernel plt.plot(dates,svr_poly.predict(dates), color= 'blue', label= 'Polynomial model') # plotting the line made by polynomial kernel plt.xlabel('Date') plt.ylabel('Price') plt.title('Support Vector Regression') plt.legend() plt.show() return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0] predicted_price = predict_prices(dates, prices, 24) print(predicted_price)

Python Image Recognition

In this project, I used OpenCV for image recognition. OpenCV is a decent Computer Vision package, it has great functionality - blurring, straight line detection, contouring. I find it not as powerful as training your own neural network using Keras or Google's Tensorflow library. In the future, I will likely look into using the YOLO (You only look once) package or Tensorflow. One drawback with training your own network is that large amounts of data cannot be trained in minutes; it could take many hours or even days. I would look into setting up a GPU if you are looking to use Tensorflow in your projects. https://github.com/akhily1/ImageRecognition


The World Of Stocks



import sys from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QApplication, QDialog, QMainWindow from PyQt5.uic import loadUi from PyQt5 import QtWidgets import quandl import math import dash import dash_core_components as dcc import dash_html_components as html import threading import numpy as np import datetime from sklearn.linear_model import LinearRegression from sklearn import preprocessing, model_selection, svm from sklearn.svm import SVR class MainWindow(QMainWindow): #link the UI def __init__(self): super(MainWindow, self).__init__() loadUi('homepage.ui', self) quandl.ApiConfig.api_key = "Ui3JT8HDDV3vnADUsD2k" #call the fill combobox function x=self.fillCombobox() #call the function to take the text from the combobox self.pushButton_2.clicked.connect(self.averageStock) #call the function to graph a stock self.pushButton_2.clicked.connect(self.visual) self.pushButton_3.clicked.connect(self.visual) self.pushButton_4.clicked.connect(self.visual) self.pushButton_5.clicked.connect(self.visual) #call the function to calculate calculate correlation self.pushButton_5.clicked.connect(self.calcCoCo) #call the function to graph correlation data self.pushButton.clicked.connect(self.graph) self.pushButton_4.clicked.connect(self.visual2) self.pushButton_3.clicked.connect(self.graph2) self.pushButton_4.clicked.connect(self.graph3) #call the function to graph related data self.pushButton_5.clicked.connect(self.visual3) self.pushButton_7.clicked.connect(self.futureDate) self.pushButton_8.clicked.connect(self.futureStocks) self.pushButton_9.clicked.connect(self.volatility) #function for filling the comboboxes def fillCombobox(self): companies=['WIKI/AAPL', 'WIKI/GOOGL', 'WIKI/TSLA', 'WIKI/IBM', 'WIKI/WMT', 'WIKI/T', 'WIKI/AMZN', 'WIKI/INTC', 'WIKI/NKE', 'WIKI/EA'] #fill the stock comboboxes for company in companies: self.comboBox.addItem(company) self.comboBox_2.addItem(company) self.comboBox_3.addItem(company) self.comboBox_4.addItem(company) self.comboBox_22.addItem(company) self.comboBox_19.addItem(company) self.comboBox_24.addItem(company) #fill the year comboboxes for i in range(2007, 2019): self.comboBox_6.addItem(str(i)) self.comboBox_7.addItem(str(i)) self.comboBox_8.addItem(str(i)) self.comboBox_25.addItem(str(i)) self.comboBox_26.addItem(str(i)) self.comboBox_9.addItem(str(i)) self.comboBox_11.addItem(str(i)) self.comboBox_12.addItem(str(i)) self.comboBox_18.addItem(str(i)) self.comboBox_14.addItem(str(i)) for i in range(2019, 2030): self.comboBox_23.addItem(str(i)) for i in range(13): if i < 10: self.comboBox_21.addItem("0" + str(i)) self.comboBox_17.addItem("0" + str(i)) else: self.comboBox_21.addItem(str(i)) self.comboBox_17.addItem(str(i)) for i in range(32): if i < 10: self.comboBox_15.addItem("0" + str(i)) self.comboBox_20.addItem("0" + str(i)) else: self.comboBox_15.addItem(str(i)) self.comboBox_20.addItem(str(i)) for i in range(10000, 100000, 10000): self.comboBox_16.addItem(str(i)) #function for taking the text from the comboboxes def averageStock(self): company=self.comboBox.currentText() year_start=int(self.comboBox_8.currentText()) year_end=int(self.comboBox_9.currentText()) listC = [] totalC = 0 for i in range(year_start, year_end+1): df = quandl.get(company, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() listC.append(y) totalC += y avgC = totalC / len(listC) self.textEdit_27.setText(str(avgC)) #functions to graph a stock def visual(self): threading.Thread(target=self.graph, daemon=True).start() def graph(self): #make the list for the y values company=self.comboBox_4.currentText() year_start=int(self.comboBox_7.currentText()) year_end=int(self.comboBox_6.currentText()) listC = [] totalC = 0 for i in range(year_start, year_end+1): print(i) df = quandl.get(company, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() listC.append(y) #make the list for the x values years=[] for i in range(year_start, year_end+1): years.append(i) #make the graph app = dash.Dash() app.layout = html.Div(children=[ html.H1(children=str(company)+' Graph'), html.Div(children='Graph of '+str(company)+' In The Years '+str(year_start)+' through '+str(year_end) ), dcc.Graph( id='ui-graphicvisualization', figure={ 'data': [ {'x': years, 'y': listC, 'type': 'line', 'name': company}, ], 'layout': { 'title': str(company) } } ) ]) app.run_server(port=1111) #function to calculate correlation def futureDate(self): sday = self.comboBox_15.currentText() smonth = self.comboBox_21.currentText() syear = self.comboBox_18.currentText() eday = self.comboBox_20.currentText() emonth = self.comboBox_17.currentText() eyear = self.comboBox_14.currentText() valueInvested = self.comboBox_16.currentText() inputStock = self.comboBox_22.currentText() startDay = quandl.get(str(inputStock), start_date = str(syear) + "-" + str(smonth) + "-" + str(sday) , end_date = str(syear) + "-" + str(smonth) + "-" + str(sday)) endDay = quandl.get(str(inputStock), start_date = str(eyear) + "-" + str(emonth) + "-" + str(eday) , end_date = str(eyear) + "-" + str(emonth) + "-" + str(eday)) x = startDay["High"].mean() y = endDay["High"].mean() percentChange = (y-x)/x finalAmt = int(valueInvested) * (1 + percentChange) self.textEdit_28.setText(str(finalAmt)) def futureStocks(self): company=self.comboBox_19.currentText() years=[] prices=[] for i in range(2007,2019): #make a list of years from 2007 to 2018 years.append(i) #make a list of averages for each year df=quandl.get(company, start_date=str(i)+"-01-01", end_date=str(i)+"-01-10") y=df["High"].mean() prices.append(y) print (prices) #create variable for third parameter of predict_stock_prices function futureYear=int(self.comboBox_23.currentText()) x=(futureYear-2018)*365 print (x) def predict_stock_prices(years, prices, x): years = np.reshape(years, (len(years),1 )) prices = np.reshape(prices, (len(prices),1)) svr_rbf = SVR(kernel = 'rbf', C=1e3, gamma=0.1) svr_rbf.fit(years, prices) return svr_rbf.predict(x)[0] predicted_prices = predict_stock_prices(years, prices, [[x]]) self.textEdit_32.setText(str(predicted_prices)) def volatility(self): inputStock = self.comboBox_24.currentText() year_start=int(self.comboBox_25.currentText()) year_end=int(self.comboBox_26.currentText()) listC = [] totalC = 0 num = 0 for i in range(year_start, year_end+1): df = quandl.get(inputStock, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() listC.append(y) totalC += y num += 1 avgC = totalC / len(listC) diffs= [] for i in listC: poop = (avgC - i) ** 2 diffs.append(poop) hello = 0 for i in diffs: hello += int(i) volatility = float((hello / len(listC))** 0.5) self.textEdit_35.setText(str(volatility)) def calcCoCo(self): inputCompany=self.comboBox_2.currentText() company=self.comboBox_3.currentText() year_start=int(self.comboBox_11.currentText()) year_end=int(self.comboBox_12.currentText()) listInput = [] totalInput = 0 listComp = [] totalComp = 0 for i in range(year_start, year_end+1): df = quandl.get(str(inputCompany), start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") df2 = quandl.get(str(company), start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() x= df2["High"].mean() listInput.append(y) totalInput += y listComp.append(x) totalComp += x avgInput = totalInput / len(listInput) avgComp = totalComp / len(listComp) listCompSquared = [] listInputSquared = [] mult = [] #to find the standard devations, u nee to find bot lists squared for i in listInput: listInputSquared.append(i**2) for i in listComp: listCompSquared.append(i**2) #to print out the multiplication of both the values in both lists, set a varaibe for both equal to the i'th number in each list. When i is 2 in the for loop, the variable will be list[i] for i in range(0, len(listInput)): itemInput = listInput[i] itemComp = listComp[i] mult.append(itemComp * itemInput) #find the average of he squared list sumPowInput = sum(listInputSquared) avgSquareInput = sumPowInput / len(listInputSquared) sumPowComp = sum(listCompSquared) avgSquareComp = sumPowComp / len(listCompSquared) sumMult = sum(mult) avgMult = sum(mult)/len(mult) varInput = avgSquareInput - (avgInput **2 ) varComp = avgSquareComp - (avgComp **2 ) coVar = (avgMult) - (avgInput * avgComp) CoCo = coVar / (math.sqrt(varInput * varComp)) self.textEdit_26.setText(str(CoCo)) #functions for graphing correlation between two stocks def visual2(self): threading.Thread(target=self.graph2, daemon=True).start() def graph2(self): inputCompany=self.comboBox_2.currentText() company=self.comboBox_3.currentText() year_start=int(self.comboBox_11.currentText()) year_end=int(self.comboBox_12.currentText()) listInput = [] totalInput = 0 listComp = [] totalComp = 0 for i in range(year_start, year_end+1): df = quandl.get(inputCompany, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") df2 = quandl.get(company, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() x= df2["High"].mean() listInput.append(y) totalInput += y listComp.append(x) totalComp += x avgInput = totalInput / len(listInput) avgComp = totalComp / len(listComp) listCompSquared = [] listInputSquared = [] mult = [] #to find the standard devations, u nee to find bot lists squared for i in listInput: listInputSquared.append(i**2) for i in listComp: listCompSquared.append(i**2) #to print out the multiplication of both the values in both lists, set a varaibe for both equal to the i'th number in each list. When i is 2 in the for loop, the variable will be list[i] for i in range(0, len(listInput)): itemInput = listInput[i] itemComp = listComp[i] mult.append(itemComp * itemInput) #find the average of he squared list sumPowInput = sum(listInputSquared) avgSquareInput = sumPowInput / len(listInputSquared) sumPowComp = sum(listCompSquared) avgSquareComp = sumPowComp / len(listCompSquared) sumMult = sum(mult) avgMult = sum(mult)/len(mult) varInput = avgSquareInput - (avgInput **2 ) varComp = avgSquareComp - (avgComp **2 ) coVar = (avgMult) - (avgInput * avgComp) CoCo = coVar / (math.sqrt(varInput * varComp)) #make the x list consisting of the years years=[] for i in range(year_start, year_end+1): years.append(i) app = dash.Dash() app.layout = html.Div(children=[ html.H1(children='Correlation Coefficients'), html.Div(children=''' Graphs each company with eachother '''), dcc.Graph( id='example-graph-5', figure={ 'data': [ {'x': years, 'y': listInput, 'type': 'line', 'name': inputCompany}, {'x': years, 'y': listComp, 'type': 'line', 'name': company}, ], 'layout': { 'title': 'Correlation= '+str(CoCo) } } ), ]) app.run_server(port=2222) #functions for graphing correlation between first stock and list of other stocks def visual3(self): threading.Thread(target=self.graph3, daemon=True).start() def graph3(self): inputCompany=self.comboBox_4.currentText() year_start=int(self.comboBox_7.currentText()) year_end=int(self.comboBox_6.currentText()) #list of companies to loop through companies=['WIKI/AAPL', 'WIKI/GOOGL', 'WIKI/TSLA', 'WIKI/IBM', 'WIKI/WMT', 'WIKI/T', 'WIKI/AMZN', 'WIKI/INTC', 'WIKI/NKE', 'WIKI/EA'] #dictionary with each company and its correlation coefficient stockList=[] absstock=[] absstockDiction={} randstockDiction={} stockList=[] absstock=[] absstockDiction={} randstockDiction={} avgValuesDiction={} #calculate correlation for each company and append the values to a dictionary for company in companies: listInput = [] totalInput = 0 listComp = [] totalComp = 0 for i in range(year_start, year_end+1): df = quandl.get(inputCompany, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") df2 = quandl.get(company, start_date = str(i)+"-01-01", end_date = str(i)+"-12-31") y = df["High"].mean() x= df2["High"].mean() listInput.append(y) totalInput += y listComp.append(x) totalComp += x avgInput = totalInput / len(listInput) avgComp = totalComp / len(listComp) listCompSquared = [] listInputSquared = [] mult = [] for i in listInput: listInputSquared.append(i**2) for i in listComp: listCompSquared.append(i**2) for i in range(0, len(listInput)): itemInput = listInput[i] itemComp = listComp[i] mult.append(itemComp * itemInput) sumPowInput = sum(listInputSquared) avgSquareInput = sumPowInput / len(listInputSquared) sumPowComp = sum(listCompSquared) avgSquareComp = sumPowComp / len(listCompSquared) sumMult = sum(mult) avgMult = sum(mult)/len(mult) varInput = avgSquareInput - (avgInput **2 ) varComp = avgSquareComp - (avgComp **2 ) coVar = (avgMult) - (avgInput * avgComp) CoCo = coVar / (math.sqrt(varInput * varComp)) #append values to dictionaries/lists randstockDiction[company]=CoCo absstockDiction[abs(CoCo)]=company absstock.append(abs(CoCo)) avgValuesDiction[inputCompany]=listInput avgValuesDiction[company]=listComp #make final sorted dictionary absstock=sorted(absstock) i=9 while i>=0: comp=absstockDiction[absstock[i]] stockList.append([comp,randstockDiction[comp]]) i=i-1 #graph the related data years=[] for i in range(year_start, year_end+1): years.append(i) app = dash.Dash() app.layout = html.Div(children=[ html.H1(children='Correlation Coefficients: Related Data'), html.Div(children=''' Graphs each company with eachother and finds the correlation coefficient. '''), dcc.Graph( id='example-graph-6', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[0][0]], 'type': 'line', 'name': stockList[0][0]}, ], 'layout': { 'title': str(stockList[0][0]) } } ), dcc.Graph( id='example-graph-7', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[1][0]], 'type': 'line', 'name': stockList[1][0]}, ], 'layout': { 'title': str(stockList[1][0]) } } ), dcc.Graph( id='example-graph-8', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[2][0]], 'type': 'line', 'name': stockList[2][0]}, ], 'layout': { 'title': str(stockList[2][0]) } } ), dcc.Graph( id='example-graph-9', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[3][0]], 'type': 'line', 'name': stockList[3][0]}, ], 'layout': { 'title': str(stockList[3][0]) } } ), dcc.Graph( id='example-graph-10', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[4][0]], 'type': 'line', 'name': stockList[4][0]}, ], 'layout': { 'title': str(stockList[4][0]) } } ), dcc.Graph( id='example-graph-11', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[5][0]], 'type': 'line', 'name': stockList[5][0]}, ], 'layout': { 'title': str(stockList[5][0]) } } ), dcc.Graph( id='example-graph-12', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[6][0]], 'type': 'line', 'name': stockList[6][0]}, ], 'layout': { 'title': str(stockList[6][0]) } } ), dcc.Graph( id='example-graph-13', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[7][0]], 'type': 'line', 'name': stockList[7][0]}, ], 'layout': { 'title': str(stockList[7][0]) } } ), dcc.Graph( id='example-graph-14', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[8][0]], 'type': 'line', 'name': stockList[8][0]}, ], 'layout': { 'title': str(stockList[8][0]) } } ), dcc.Graph( id='example-graph-15', figure={ 'data': [ {'x': years, 'y': avgValuesDiction[inputCompany], 'type': 'line', 'name': inputCompany}, {'x': years, 'y': avgValuesDiction[stockList[9][0]], 'type': 'line', 'name': stockList[9][0]}, ], 'layout': { 'title': str(stockList[9][0]) } } ) ]) app.run_server(port=3333) app = QApplication(sys.argv) widget = MainWindow() widget.show() sys.exit(app.exec_())

Sports Clothing Image Recognition

A project I worked on early last year involved taking an image of sports clothing and matching the image to the actual product on Amazon. It would be cool to see if someone could add extra features to this such that the program could retrieve the product from other clothing sites as well and filter for the cheapest ones. The video shows the current status of the project and the github link is here: https://github.com/akhily1/Sports-Apparel-Matching


1