Aggregate Water Budget Time Series Data
Access Token. Please get in touch with the WaDE Team at WaDE_WSWC@hotmail.com to get an API access token. The token protects the API from bots and attacks. The WaDE team plans to support a personalized token for each user as part of the WesDAAT user management system. We apologize for the inconvenience.
Retrieves aggregate water budget data for a site by passing in the appropriate options; you can search for available inventory in the system.
Here is the API endpoint and its parameters
Click on the arrow next to the "response" block in the link below to see the API schema and an example response.
Example Python code for using the API
# Needed Libraries
!pip install numpy
!pip install pandas
!pip install openpyxl
import openpyxl
import os
import numpy as np
import pandas as pd
import requests
import json
pd.set_option('display.max_columns', 999) # How to display all columns of a Pandas DataFrame in Jupyter Notebook
print ("Libraries have been imported")
# Setting work directory
cwd = os.getcwd()
Output = cwd
print("current directory = "+Output)
Retrieve Data using WaDE 2.0 SiteVariableAmounts API.
Performing a loop with StartIndex = 0, then do iterations of 1000 rows.
The default return is 1000 rows per API call. It can change with the RecordCount element.
# Create output dataframes to store each dictionary section of the API return.
Organizations = pd.DataFrame()
WaterSources = pd.DataFrame()
ReportingUnits = pd.DataFrame()
VariableSpecifics = pd.DataFrame()
Methods = pd.DataFrame()
BeneficialUses = pd.DataFrame()
AggregatedAmounts = pd.DataFrame()
print("done")
# Pre-set base elements of API call for easy construction.
# Save as strings.
# uat connection
# base API URL.
base_url_API = "https://wade-api.azure-api.net/v1/AggregatedAmounts?"
# limit search to state of interest, use abbreviation.
param1 = "State=UT"
# security API Key.
key = "key=beba8a9dd8724fabb3b16d2a415e9aab"
print("done")
StartIndex = 0 # will do bounds of 1000
loopCounter = 0 # counter for the number of loops we want
loopRange = 100 # how many loops we want to do
# The loop
for loopCounter in range (loopRange):
StartIndex_param = "StartIndex=" + str(StartIndex)
print (StartIndex_param)
# combine the API parameters together
callString = base_url_API + param1 + "&" + StartIndex_param + "&" + key
print (callString)
# Call the API
# check if API has a response
try:
# The get
response_dict = requests.get(callString).json()
# Indexing and list slicing to append to individual tables.
# Organizations Data
o_json_data = response_dict['Organizations'][0]
o_tempDF = pd.json_normalize(o_json_data)
Organizations = pd.DataFrame(o_tempDF, columns=['OrganizationName',
'OrganizationPurview',
'OrganizationWebsite',
'OrganizationState',
'OrganizationContactEmail',
'OrganizationPhoneNumber',
'OrganizationContactName',
'OrganizationContactEmail'], index=[0])
# WaterSource Data
ws_json_data = response_dict['Organizations'][0]['WaterSources']
ws_tempDF = pd.json_normalize(ws_json_data)
WaterSources = pd.concat([WaterSources, ws_tempDF])
# ReportingUnits Data
ru_json_data = response_dict['Organizations'][0]['ReportingUnits']
ru_tempDF = pd.json_normalize(ru_json_data)
ReportingUnits = pd.concat([ReportingUnits, ru_tempDF])
# VariableSpecifics Data
v_json_data = response_dict['Organizations'][0]['VariableSpecifics']
v_tempDF = pd.json_normalize(v_json_data)
VariableSpecifics = pd.concat([VariableSpecifics, v_tempDF])
# Methods Data
m_json_data = response_dict['Organizations'][0]['Methods']
m_tempDF = pd.json_normalize(m_json_data)
Methods = pd.concat([Methods, m_tempDF])
# BeneficialUses Data
bu_json_data = response_dict['Organizations'][0]['BeneficialUses']
bu_tempDF = pd.json_normalize(bu_json_data)
BeneficialUses = pd.concat([BeneficialUses, bu_tempDF])
# AggregatedAmounts Data
aa_json_data = response_dict['Organizations'][0]['AggregatedAmounts']
aa_tempDF = pd.json_normalize(aa_json_data)
aa_tempDF['StartIndex'] = str(StartIndex) #tracking StartIndex used
AggregatedAmounts = pd.concat([AggregatedAmounts, aa_tempDF])
except:
print("StartIndex_param has no data")
StartIndex = StartIndex + 1000
print("------------")
WaterSources = WaterSources.drop_duplicates().sort_values(by=['WaterSourceUUID']).reset_index(drop=True)
ReportingUnits = ReportingUnits.drop_duplicates(subset=['ReportingUnitUUID']).sort_values(by=['ReportingUnitUUID']).reset_index(drop=True)
VariableSpecifics = VariableSpecifics.drop_duplicates().reset_index(drop=True)
Methods = Methods.drop_duplicates().sort_values(by=['MethodUUID']).reset_index(drop=True)
BeneficialUses = BeneficialUses.drop_duplicates().sort_values(by=['Term']).reset_index(drop=True)
print("done")
Export results
Create a Pandas Excel writer function to save each dataframe to a separate sheet.
with pd.ExcelWriter('results/WaDE_AggregateAPI_Sample.xlsx') as writer:
Organizations.to_excel(writer, sheet_name='Organizations')
WaterSources.to_excel(writer, sheet_name='WaterSources')
ReportingUnits.to_excel(writer, sheet_name='ReportingUnits')
VariableSpecifics.to_excel(writer, sheet_name='VariableSpecifics')
Methods.to_excel(writer, sheet_name='Methods')
BeneficialUses.to_excel(writer, sheet_name='BeneficialUses')
AggregatedAmounts.to_excel(writer, sheet_name='AggregatedAmounts')
print("done")
Last updated