Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import pandas as pd
2import numpy as np
3import json
4import time
5from flask import Flask
7#from werkzeug.middleware.proxy_fix import ProxyFix
10# -------------------------------------------------------------------------------
11app = Flask(__name__)
12app.logger.error('Start')
14_CACHE_GROUPBY = dict()
15_CACHE_RKI = dict()
16_CACHE_VALUES = dict()
17_CACHE_RKILINE = dict()
20# -------------------------------------------------------------------------------
21def loaddata():
22 #df=pd.read_csv("https://radamanthys.de/datasets/RKI/rki-20210308.csv", delimiter=",")
23 df=pd.read_csv("https://radamanthys.de/datasets/RKI/rki-20210324.csv", delimiter=",")
24 df['Meldedatum'] = pd.to_datetime(df.Meldedatum)
25 df['Refdatum'] = pd.to_datetime(df.Refdatum)
26 df['SummeFall'] = np.where(df.NeuerFall.isin([0,1]), df.AnzahlFall, 0)
27 df['SummeTod'] = np.where(df.NeuerTodesfall.isin([0,1]), df.AnzahlTodesfall, 0)
28 df['SummeGenesen'] = np.where(df.NeuGenesen.isin([0,1]), df.AnzahlGenesen, 0)
31 gro = pd.read_csv("https://radamanthys.de/datasets/DIVI/gemeindegroesse.csv")
32 c=pd.merge(df, gro, how='left', left_on="IdLandkreis", right_on="gemeindeschluessel")
33 c['FallPer100k'] = c.SummeFall * 100000 / c.einwohner
34 c['TodPer100k'] = c.SummeTod * 100000 / c.einwohner
35 c['GenesenPer100k'] = c.SummeGenesen * 100000 / c.einwohner
37 return c
39s=time.time()
40df = loaddata()
41app.logger.error('load time %s', (time.time()-s))
44# -------------------------------------------------------------------------------
45@app.route('/')
46def hello_world():
47 return 'Hello, World!'
50# -------------------------------------------------------------------------------
51@app.route('/lists/dates')
52def get_dates():
53 return json.dumps(["Refdatum","Meldedatum"])
56# -------------------------------------------------------------------------------
57@app.route('/lists/groups')
58def get_groups():
59 return json.dumps([None, "Geschlecht","Altersgruppe","Bundesland","Landkreis"])
62# -------------------------------------------------------------------------------
63@app.route('/lists/columns')
64def get_columns():
65 return json.dumps(["Bundesland","Landkreis","Altersgruppe","Geschlecht"])
68# -------------------------------------------------------------------------------
69@app.route('/lists/values/<column>')
70def get_values(column):
71 r = _CACHE_VALUES.get(column)
72 if not r:
73 if column in df.columns:
74 #return json.dumps( sorted(list(df[column].unique())) )
75 d = sorted(list(df[column].unique()))
76 else:
77 #return json.dumps( list() )
78 d = list()
79 _CACHE_VALUES[column] = r = json.dumps(d)
80 return r
82# -------------------------------------------------------------------------------
83@app.route('/lists/attributes')
84def get_attributes():
85 return json.dumps(["SummeFall","SummeTod","SummeGenesen"])
88# -------------------------------------------------------------------------------
89@app.route('/rkiline/<datetype>/<column>/<value>')
90def rkiline(datetype, column, value):
91 r = _CACHE_RKILINE.get((datetype, column, value))
93 if not r:
95 #if column in ['Bundesland','Landkreis'] and value.find("Berlin")<0:
96 # bl = df[df[column]==value].groupby([datetype]).sum() [["FallPer100k","TodPer100k"]]
97 # d["title"] = { "text": f"{datetype} {column} {value} pro 100T Einwohner" }
98 #else:
99 # bl = df[df[column]==value].groupby([datetype]).sum() [["SummeFall","SummeTod"]]
100 # d["title"] = { "text": f"{datetype} {column} {value}" }
102 opt = dict()
104 opt["colors"] = ["#FF1654", "#247BA0"]
105 opt["stroke"] = {"width": [1,1]}
107 opt["yaxis"] = [
108 {
109 "title": { "text": "Fälle", "style": { "color": "#FF1654" } },
110 "labels": { "style": { "colors": "#FF1654" } },
111 "axisBorder": { "show": True, "color": "#FF1654"}
112 },
113 {
114 "opposite": True,
115 "title": { "text": "Todesfälle", "style": { "color": "#247BA0" } },
116 "labels": { "style": { "colors": "#247BA0" } },
117 "axisBorder": { "show": True, "color": "#247BA0"}
118 }
119 ]
120 opt["dataLabels"] = {"enabled": False}
122 bl = df[df[column]==value].groupby([datetype]).sum() [["SummeFall","SummeTod"]]
123 opt["title"] = { "text": f"{datetype} {column} {value}" }
125 bl = bl.rolling(7).mean()
126 opt["xaxis"] = dict()
127 opt["xaxis"]["categories"] = list(np.array([i.isoformat() for i in bl.index]))
129 d = dict()
130 d["options"] = opt
131 d["series"] = []
133 for col in bl.columns:
134 l = list()
135 for i in bl.index:
136 try:
137 val = int(bl.loc[i, col])
138 except KeyError:
139 val = 0
140 except ValueError:
141 val = 0
142 l.append(val)
144 d["series"].append( {"name": col, "data": l} )
145 r = d
146 _CACHE_RKILINE[(datetype, column, value)] = r = json.dumps(d)
148 return r
149# -------------------------------------------------------------------------------
150#@app.route('/rkiline2')
151#def rkiline2():
152# bl = df.groupby(["Refdatum"]).sum() [["SummeFall","SummeTod"]]
153# bl = bl.rolling(7).mean()
154#
155# d = dict()
156# d["chart"] = { "type": "bar", "height": "350px" }
157# d["title"] = { "text": f"uuuu" }
158# d["series"] = []
159# d["xaxis"] = list(np.array([i.isoformat() for i in bl.index]))
160# #d["xaxis"] = list(np.array([i.strftime("%d.%m.%Y") for i in bl.index]))
161#
162# for col in bl.columns:
163# l = list()
164# for i in bl.index:
165# try:
166# val = int(bl.loc[i, col])
167# except KeyError:
168# val = 0
169# except ValueError:
170# val = 0
171# l.append(val)
172# #l.append( {'x': int(i.timestamp()), 'y': val})
173#
174# d["series"].append( {"name": col, "data": l} )
175#
176# return json.dumps(d, default=str)
178# -------------------------------------------------------------------------------
179@app.route('/rkibar/<attribute>/<groupby1>/<groupby2>')
180def rkibar(attribute, groupby1, groupby2):
182 r = _CACHE_RKI.get((attribute, groupby1, groupby2))
183 if not r:
184 if groupby2 == 'null':
185 ge=df.groupby([groupby1]).sum() [[attribute]]
186 d = dict()
187 d["chart"] = { "type": "bar", "height": "350px" }
188 d["title"] = { "text": f"{attribute} gruppiert nach {groupby1}" }
189 d["series"] = []
190 d["xaxis"] = list(ge.index)
192 l = list()
193 for i in ge.index:
194 try:
195 val = int(ge.loc[i, attribute])
196 except KeyError:
197 val = 0
198 except ValueError:
199 val = 0
200 l.append(val)
202 d["series"].append( {"name": attribute, "data": l} )
205 else:
206 ge=df.groupby([groupby1, groupby2]).sum() [[attribute]]
208 d = dict()
209 d["chart"] = { "type": "bar", "height": "350px" }
210 d["title"] = { "text": f"{attribute} gruppiert nach {groupby1} und {groupby2}" }
211 d["series"] = []
214 level1 = list(ge.index.get_level_values(0).unique())
215 level2 = list(ge.index.get_level_values(1).unique())
217 tmp = dict()
218 for index2 in level2:
219 if index2 not in tmp:
220 tmp[index2] = []
221 for index1 in level1:
222 try:
223 val = int(ge.loc[(index1,index2), attribute])
224 except KeyError:
225 val = 0
226 except ValueError:
227 val = 0
228 tmp[index2].append(val)
231 d["xaxis"] = level1
232 for index2 in level2:
233 d["series"].append( {"name": index2, "data": tmp[index2]} )
235 _CACHE_RKI[(attribute, groupby1, groupby2)] = r = json.dumps(d)
236 return r
238# -------------------------------------------------------------------------------
239#@app.route('/groupby/<attribute>')
240#def groupby_geschlecht(attribute):
241# r = _CACHE_GROUPBY.get(attribute)
242# if not r:
243# ge=df.groupby(attribute).sum() [["aktFall","aktTod"]]
244# #ge.index=ge.index.strftime("%d.%m.%Y")
245# _CACHE_GROUPBY[attribute] = r = ge.to_json()
246# return r
249# -------------------------------------------------------------------------------
250#from gevent.pywsgi import WSGIServer
251#if __name__ == '__main__':
252# http_server = WSGIServer(('', 5000), app)
253# http_server.serve_forever()