Skip to content

Commit 1963a89

Browse files
committed
testing ajax for async prediction and small changes in files
1 parent 9687e65 commit 1963a89

File tree

10 files changed

+77
-30
lines changed

10 files changed

+77
-30
lines changed

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
__pycache__/
22
data.sqlite
3-
info.sqlite3
3+
info.sqlite3
4+
test-101.txt
5+
test-150.txt

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,4 @@ It might take few seconds to load please give it a try
2929
<li>app.py is the flask application file</li>
3030
<li>model_nltk.py contains the source code for the Naive Bayes classifer which has been used in the production</li>
3131
<li>model_keras.py is another source code for model usign keras but we haven't used this in production because of its accuracy.</li>
32-
</ul>
32+
</ul>

app.py

Lines changed: 34 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
# importing libraries for flask, database, model
2-
from flask import Flask, render_template, request, jsonify, redirect, url_for
3-
from flask_sqlalchemy import SQLAlchemy
4-
from model_nltk import predict_sentiment
2+
import os
53
from datetime import datetime
6-
from textblob import TextBlob
74
from pickle import load
5+
86
import pytz
9-
import os
7+
from flask import Flask, jsonify, redirect, render_template, request, url_for
8+
from flask_sqlalchemy import SQLAlchemy
9+
from textblob import TextBlob
10+
11+
from model_nltk import predict_sentiment
1012

1113
app = Flask(__name__, template_folder='templates')
1214

@@ -20,6 +22,7 @@
2022

2123
db = SQLAlchemy(app)
2224

25+
# since the app is hosted on heroku so this line of code is to change the timezone
2326
IST = pytz.timezone('Asia/Kolkata')
2427

2528

@@ -32,6 +35,7 @@ class New_Data(db.Model):
3235
Id = db.Column(db.Integer, primary_key=True)
3336
Text = db.Column(db.Text)
3437
Sentiment = db.Column(db.String(20))
38+
# .now(IST).strftime('%Y-%m-%d %H:%M:%S'))
3539
Date = db.Column(db.DateTime, default=datetime.now(IST).strftime('%Y-%m-%d %H:%M:%S'))
3640

3741
def __init__(self, Text, Sentiment):
@@ -45,6 +49,7 @@ def __init__(self, Text, Sentiment):
4549

4650

4751
def allowed_file(filename):
52+
'''Checking file extension i.e. text file or not'''
4853
return '.' in filename and filename.split('.')[1] == 'txt'
4954

5055

@@ -66,6 +71,7 @@ def home():
6671
else:
6772
pass
6873

74+
# creating an instance of the data table for the database and commiting the changes
6975
usr_data = New_Data(sentence, sentiment.split()[0])
7076
db.session.add(usr_data)
7177
db.session.commit()
@@ -89,21 +95,31 @@ def contact():
8995

9096

9197
# route for fastapi
92-
@app.route('/fast-api/', defaults={'sentence' : 'Great'})
98+
# setting default value for the api
99+
@app.route('/fast-api/', defaults={'sentence': 'Great'})
93100
@app.route('/fast-api/<sentence>')
94101
def fast_api(sentence):
95102
sentiment = predict_sentiment(sentence, classifier)
96103

97104
return jsonify({'sentence': sentence, 'sentiment': sentiment})
98105

99106

107+
# setting post method for the api
108+
@app.route('/fastapi', methods=['POST'])
109+
def fastapi():
110+
text = request.form['text']
111+
return jsonify({'sentiment' : 'Positive' if TextBlob(text).sentiment.polarity > 0 else 'Negative'})
112+
113+
100114
# route for uploading and saving temperary file
101115
@app.route('/upload')
102116
def upload():
103117
mssg = request.args.get('msg')
118+
# if the uploaded file is not a text file
104119
if mssg == "ntxt":
105120
mssg = "Kindly Upload a text file"
106121

122+
# if the uploaded textfile is not readable
107123
elif mssg == "incrt":
108124
mssg = "Upload file of correct format"
109125

@@ -122,18 +138,21 @@ def canvas():
122138
subject = []
123139
polar = []
124140
file = request.files['file']
125-
141+
142+
# if the file is correct and readable then save it
126143
if allowed_file(file.filename):
127144
file.save(file.filename)
128145

129146
try:
147+
# open file, read the content perform the analysis and then return the template with the values
130148
with open(file.filename) as fl:
131149
content = fl.read().split('\n')
132-
for t in content:
150+
for line in content:
133151
# t = fl.readline()
134-
a = TextBlob(t).sentiment.polarity*100
135-
subject.append(TextBlob(t).sentiment.subjectivity*100)
152+
a = TextBlob(line).sentiment.polarity*100
136153
polar.append(a)
154+
subject.append(
155+
TextBlob(line).sentiment.subjectivity*100)
137156
if a > 0:
138157
pos += 1
139158
else:
@@ -191,6 +210,11 @@ def show():
191210
return redirect(url_for('login', er="lnf"))
192211

193212

213+
@app.route('/test')
214+
def test():
215+
return render_template('index.html')
216+
217+
194218
@app.errorhandler(404)
195219
def error404(error):
196220
return render_template("error404.html"), 404

fastapi.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@
1919
neg += 1
2020
total = len(file.readlines)
2121

22-
print("Out of {total} reviews, {pos} are Positive and {neg} are Negative reviews")
22+
print(f"Out of {total} reviews, {pos} are Positive and {neg} are Negative reviews")

model_keras.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1+
import matplotlib.pyplot as plt
2+
import numpy as np
3+
from keras.models import load_model
4+
from tensorflow.python.keras.callbacks import LambdaCallback
15
from tensorflow.python.keras.datasets import imdb
6+
from tensorflow.python.keras.layers import (Dense, Embedding,
7+
GlobalAveragePooling1D)
28
from tensorflow.python.keras.models import Sequential
39
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
4-
from tensorflow.python.keras.layers import Embedding, Dense, GlobalAveragePooling1D
5-
from tensorflow.python.keras.callbacks import LambdaCallback
6-
from keras.models import load_model
7-
import matplotlib.pyplot as plt
8-
import numpy as np
9-
1010

1111
# This model was used in early stage of this project and used as a reference, learned from coursesa project
1212

@@ -111,14 +111,15 @@ def show_lengths():
111111

112112
decode(x_test[1])
113113

114+
# these are samples for manually testing examples
114115
text = "i was working on that project and i find it quiet amazing and funny overall the experience was good and satisfying"
115-
text = "working on this was a worst experience for me i hate this very much and wish no one should get though this"
116-
text = "you are a waste"
116+
text1 = "working on this was a worst experience for me i hate this very much and wish no one should get though this"
117+
text2 = "you are a waste"
117118
t_list = []
118119

119120
for i in text.split():
120121
t_list.append(word_index[i])
121-
122+
122123
print(t_list)
123124

124125
prediction = md.predict(np.expand_dims(t_list, axis=0))

model_nltk.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
# importing libraries for persorming the sentiment analysis, cleaning data, training and saving model
2-
from nltk.tag import pos_tag
3-
from nltk.tokenize import word_tokenize
4-
from nltk.stem.wordnet import WordNetLemmatizer
5-
from nltk.corpus import twitter_samples, stopwords
6-
from nltk import FreqDist, classify, NaiveBayesClassifier
72
import pickle
3+
import random
84
import re
95
import string
10-
import random
6+
7+
from nltk import FreqDist, NaiveBayesClassifier, classify
8+
from nltk.corpus import stopwords, twitter_samples
9+
from nltk.stem.wordnet import WordNetLemmatizer
10+
from nltk.tag import pos_tag
11+
from nltk.tokenize import word_tokenize
1112

1213

1314
def remove_noise(tweet_tokens, stop_words=()):
@@ -35,6 +36,7 @@ def remove_noise(tweet_tokens, stop_words=()):
3536

3637

3738
def get_all_words(cleaned_tokens_list):
39+
'''It acts as an generator for the tokens'''
3840
for tokens in cleaned_tokens_list:
3941
for token in tokens:
4042
yield token
@@ -62,11 +64,13 @@ def save_model():
6264

6365
if __name__ == "__main__":
6466

67+
# loading dataset for model trainig
6568
positive_tweets = twitter_samples.strings('positive_tweets.json')
6669
negative_tweets = twitter_samples.strings('negative_tweets.json')
6770
text = twitter_samples.strings('tweets.20150430-223406.json')
6871
tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]
6972

73+
# saving the stopwords from the nltk into a variable
7074
stop_words = stopwords.words('english')
7175

7276
positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')

nltk.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
punkt
22
stopwords
33
wordnet
4-
averaged_perceptron_tagger
4+
averaged_perceptron_tagger

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@ gunicorn==20.0.4
44
Flask-SQLAlchemy==2.4.4
55
psycopg2==2.8.6
66
textblob==0.15.3
7-
pytz==2020.1
7+
pytz==2020.1

static/js/app.js

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// this is same as the script.js but i am using a different approach
2+
$(document).ready(function () {
3+
4+
$("#text").keyup(function () {
5+
6+
var text = $("#text").val();
7+
$.post('/fastapi', {
8+
text : text
9+
}, function (data, status) {
10+
$("#ans").html(data.sentiment);
11+
});
12+
13+
});
14+
15+
});

templates/index.html

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ <h1>Sentiment&nbspAnalysis</h1>
3434
<h3>{{text}}</h3>
3535
<br>
3636
<h3>{{sentiment}}</h3>
37+
<h3 id="ans"></h3>
3738
<br>
3839
<div class="main">
3940
<p>Note:</p>

0 commit comments

Comments
 (0)