-
Notifications
You must be signed in to change notification settings - Fork 14
/
app.py
148 lines (117 loc) · 3.97 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_compress import Compress
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy import func
from sqlalchemy.pool import Pool
from sqlalchemy.pool import NullPool
from util import safe_commit
from util import elapsed
from util import HTTPMethodOverrideMiddleware
from util import read_csv_file
import logging
import sys
import os
import requests
import redis
import time
import json
from collections import defaultdict
from rq import Queue
# set up logging
# see http://wiki.pylonshq.com/display/pylonscookbook/Alternative+logging+configuration
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(name)s - %(message)s'
)
logger = logging.getLogger("software")
libraries_to_mum = [
"requests.packages.urllib3",
"requests_oauthlib",
"stripe",
"oauthlib",
"boto",
"newrelic",
"RateLimiter",
"urllib3",
"paramiko",
"chardet"
]
for a_library in libraries_to_mum:
the_logger = logging.getLogger(a_library)
the_logger.setLevel(logging.WARNING)
the_logger.propagate = True
requests.packages.urllib3.disable_warnings()
app = Flask(__name__)
# app.debug = True
# database stuff
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True # as instructed, to supress warning
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config['SQLALCHEMY_ECHO'] = (os.getenv("SQLALCHEMY_ECHO", False) == "True")
# from http://stackoverflow.com/a/12417346/596939
class NullPoolSQLAlchemy(SQLAlchemy):
def apply_driver_hacks(self, app, info, options):
options['poolclass'] = NullPool
return super(NullPoolSQLAlchemy, self).apply_driver_hacks(app, info, options)
db = NullPoolSQLAlchemy(app)
# do compression. has to be above flask debug toolbar so it can override this.
compress_json = os.getenv("COMPRESS_DEBUG", "False")=="True"
# set up Flask-DebugToolbar
if (os.getenv("FLASK_DEBUG", False) == "True"):
logger.info("Setting app.debug=True; Flask-DebugToolbar will display")
compress_json = False
app.debug = True
app.config['DEBUG'] = True
app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False
app.config["SQLALCHEMY_RECORD_QUERIES"] = True
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
toolbar = DebugToolbarExtension(app)
# gzip responses
Compress(app)
app.config["COMPRESS_DEBUG"] = compress_json
# for running rq jobs
ti_queues = []
redis_rq_conn = redis.from_url(
os.getenv("REDIS_URL", "redis://127.0.0.1:6379"),
db=0
)
for i in range(0, 2): # number of queues to spin up
ti_queues.append(
Queue("ti-queue-{}".format(i), connection=redis_rq_conn)
)
# imports got here for tables that need auto-created.
# from models import temp_orcid_profile
# from models import temp_product
#
# from models import person
# from models import product
# from models import badge
# from models import refset
# from models import email
# from models import log_temp_profile
# from models import log_openness
#
# db.create_all()
# commit_success = safe_commit(db)
# if not commit_success:
# print u"COMMIT fail making objects"
# from http://docs.sqlalchemy.org/en/latest/core/pooling.html
# This recipe will ensure that a new Connection will succeed even if connections in the pool
# have gone stale, provided that the database server is actually running.
# The expense is that of an additional execution performed per checkout
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
# optional - dispose the whole pool
# instead of invalidating one at a time
# connection_proxy._pool.dispose()
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()