# Modeling the Joint Distribution of Wind Speed and Direction using Gaussain Mixture Models¶

OEN Method: Harris, Cook The parent wind speed distribution: Why Weibull? http://www.sciencedirect.com/science/article/pii/S0167610514001056

Gaussian Mixture Models, http://scikit-learn.org/stable/modules/mixture.html

# 1. Set up¶

## 1.1 Environment¶

In [1]:
%matplotlib inline

from import_file import *
from helpers.parallel_helper import *

plt.rcParams['axes.autolimit_mode'] = 'round_numbers'
plt.rcParams['axes.xmargin'] = 0.
plt.rcParams['axes.ymargin'] = 0.
mpl.rcParams['patch.force_edgecolor'] = True


In [2]:
# file_path, bandwidth= './data/NCDC/europe/uk/marham/dat.txt', 1.7
# file_path, bandwidth= './data/NCDC/europe/uk/tiree/dat.txt', 1.9
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NCDC/europe/uk/boscombe_down/dat.txt', 1.5, 4
# file_path, bandwidth= './data/NCDC/europe/uk/middle_wallop/dat.txt', 1.3
# file_path, bandwidth= './data/NCDC/europe/uk/bournemouth/dat.txt',1.3 # 4?
# file_path= "./data/NCDC/europe/uk/weybourne/dat.txt"
# file_path= "./data/NCDC/europe/uk/skye_lusa/dat.txt" #
# file_path= "./data/NCDC/europe/uk/wattisham/dat.txt"
# file_path= "./data/NCDC/europe/uk/south_uist_range/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/holbeach/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/cambridge/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/us/baltimore/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/bealach_na_ba/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/benbecula/dat.txt" # truncate (untruncate in m/s), 4?
# file_path= './data/NCDC/europe/uk/southhamption/dat.txt' # high 0, trend

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = "./data/NCDC/europe/germany/landsberg_lech/dat.txt", 0.9, 4
# file_path, bandwidth= "./data/NCDC/europe/germany/neuburg/dat.txt", 0.7
# file_path, bandwidth= "./data/NCDC/europe/germany/laupheim/dat.txt", 0.7 # double peak, 4?, trend
# file_path, bandwidth= './data/NCDC/europe/germany/niederstetten/dat.txt', 0.9 # get the peak
# file_path, bandwidth= "./data/NCDC/europe/germany/holzdorf/dat.txt", 0.9 # 2008 year
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/europe/france/nantes/dat.txt', 0.9, 4 # unit shift, one direction deviate big
# file_path, convert_to_knot= './data/NCDC/europe/france/pau_pyrenees/dat.txt', True # unit shift, 2; force using knot
# file_path= "./data/NCDC/europe/france/avord/dat.txt" # try 4, initial speed (should be good with m/s), incompete dataset
# file_path= "./data/NCDC/europe/france/vatry/dat.txt"  # double peak, initial speed, incompete dataset
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= "./data/NCDC/europe/spain/valladolid/dat.txt", 1.1, 4
# file_path= './data/NCDC/europe/spain/jerez/dat.txt' # high 0
# file_path, bandwidth= "./data/NCDC/europe/spain/barayas/dat.txt", 0.7 # not good fit
# file_path, bandwidth= './data/NCDC/europe/spain/malaga/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/tenerife_sur/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/almeria/dat.txt', 0.7 # negative dimensions?
# file_path, bandwidth= './data/NCDC/europe/greece/eleftherios_intl/dat.txt',0.7 # some direction might be blocked
# file_path= './data/NCDC/europe/ciampino/dat.txt' # try 4, bandwidth?
# file_path= "./data/NCDC/europe/huspel_aws/dat.txt"  # integer, 4?
# file_path= './data/NCDC/gibraltar/dat.txt' # bad fit

# MidEast
# file_path, bandwidth= './data/NCDC/mideast/uae/al_maktoum/dat.txt', 1.1
# file_path= './data/NCDC/mideast/uae/sharjah_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/dubai_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/abu_dhabi_intl/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/uae/bateen/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/buraimi/dat.txt' # not good dataset
# file_path= './data/NCDC/mideast/turkey/konya/dat.txt'
# file_path= './data/NCDC/mideast/turkey/sivas/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/balikesir/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/bartin/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/iran/chahbahar/dat.txt'
# file_path= './data/NCDC/mideast/iran/zabol/dat.txt' # Problematic data
# file_path= './data/NCDC/mideast/iran/torbat_heydarieh/dat.txt' # Unusable

# file_path, bandwidth = "./data/NCDC/cn/shanghai/hongqiao_intl/dat.txt", 0.6
# file_path, bandwidth= "./data/NCDC/cn/shanghai/pudong/dat.txt", 0.8
file_path, bandwidth= "./data/NCDC/cn/hefei_luogang/dat.txt", 0.6 # few 0, trend, try 2
# file_path, bandwidth= "./data/NCDC/cn/nanjing_lukou/dat.txt", 0.5
# file_path= "./data/NCDC/cn/zhengzhou_xinzheng/dat.txt"
# file_path= "./data/NCDC/cn/tianjin/binhai/dat.txt" # few 0, trend, stationary speed, unstationary direction
# file_path= "./data/NCDC/cn/tianjin/tianjing/dat.txt" # 16 sectors
# file_path= "./data/NCDC/cn/shijiazhuang_zhengding/dat.txt"
# file_path= "./data/NCDC/cn/henan_gushi/dat.txt" # 16 sectors, fit not very good
# file_path= "./data/NCDC/cn/nanning_wuxu/dat.txt" # numpy priblem, unstationary speed
# file_path= './data/NCDC/cn/macau/dat.txt'
# file_path= "./data/NCDC/cn/hk_intl/dat.txt" # few 0
# file_path= './data/NCDC/cn/gaoqi/dat.txt'

# file_path= './data/NCDC/southeast_asia/malaysia/mersing/dat.txt' # 2 mode, paper comparison
# file_path= './data/NCDC/southeast_asia/malaysia/penang/dat.txt'
# file_path= './data/NCDC/southeast_asia/malaysia/butterworth/dat.txt' # 2 mode
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_mahmud/dat.txt" # stable
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_ismail/dat.txt" #
# file_path= "./data/NCDC/southeast_asia/singapore/changi/dat.txt" # trend, no 0, questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/paya_lebar/dat.txt" # questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/seletar/dat.txt"
# file_path= "./data/NCDC/east_asia/cheongju_intl/dat.txt" # 2005-2009  may have problem, fit is good; numpy problem
# file_path= "./data/NCDC/east_asia/daegu_ab/dat.txt" # recent 5 year may have problem, but fit is generally good; numpy problem

# file_path, bandwidth= "./data/NCDC/oceania/auckland_intl/dat.txt", 0.9  # Good data, double mode
# file_path= "./data/NCDC/oceania/brisbane_archerfield/dat.txt" # high 0, few data
# file_path= "./data/NCDC/oceania/narrandera/dat.txt" # high 0, few data
# file_path, bandwidth= "./data/NCDC/oceania/canberra/dat.txt", 0.7 # high 0, bad fit
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/oceania/horsham/dat.txt', 0.9, 4 # get the peak

# file_path, bandwidth= './data/NCDC/us/boston_16nm/dat.txt', 0.9 # Offshore, mixed type

# file_path, bandwidth= './data/asos/olympia/hr_avg.csv', 0.5 # might block
# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = './data/asos/bismarck_ND/hr_avg.csv', 1.1, 4
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/aberdeen_SD/hr_avg.csv', 1.7, 2 # only to 2012
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/minneapolis/hr_avg.csv', 1.1, 4
# file_path, bandwidth = './data/asos/lincoln_NE/hr_avg.csv', 0.9
# file_path, bandwidth = './data/asos/des_moines_IA/hr_avg.csv', 1.3
# file_path, bandwidth = './data/asos/springfield_IL/hr_avg.csv', 1.1
# file_path, bandwidth = './data/asos/topeka/hr_avg.csv', 0.7 # High 0
# file_path, bandwidth = './data/asos/denver/hr_avg.csv', 1.3

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NDAWN/baker/hr_avg.csv', 0.7, 4
# file_path, bandwidth = './data/NDAWN/dickinson/hr_avg.csv', 0.6
# file_path = './data/NDAWN/rugby/hr_avg.csv'
# file_path = './data/NDAWN/bowman/hr_avg.csv'
# file_path = './data/NDAWN/grand_forks/hr_avg.csv'
# file_path = './data/NDAWN/williston/hr_avg.csv'
# file_path = './data/NDAWN/jamestown/hr_avg.csv'

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/usa/47N123W/dat.csv', 0.7, 4 #good
# file_path, bandwidth = 'data/ECMWF/venezuela/8N67W/dat.csv', 0.7 # good, but the data might be problematic.
# file_path, bandwidth = 'data/ECMWF/chile/52S75W/dat.csv', 1.9 # good
# file_path, bandwidth= 'data/ECMWF/iceland/65N17W/dat.csv', 1.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = 'data/ECMWF/germany/49N9E/dat.csv', 1.1, 4 # miss peak
# file_path, bandwdith = 'data/ECMWF/sudan/18N32E/dat.csv', 1.1 # good
# file_path, bandwidth = 'data/ECMWF/china/24N121E/dat.csv', 0.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/australia/37S142E/dat.csv', 0.7, 4 # miss the peak, force bandwidth 0.7

In [3]:
if "cn_database" in file_path:
elif 'NCDC' in file_path:
df.rename(columns={'Date':'date','Dir':'dir','Spd':'speed','Type':'type','I.1':'wind_type'}, inplace=True)
df = df[['date','HrMn','type','dir','speed','wind_type' ]]
df.dropna(subset=['dir','speed'], inplace=True)
integer_data = True
elif 'NDAWN' in file_path:
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
elif 'asos' in file_path:
# ASOS
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
knot_unit = True
else:
df.rename(columns={'U':'x','V':'y'}, inplace=True)
df.x=-df.x
df.y=-df.y
df['speed']=np.sqrt(df.x**2+df.y**2)
df['dir']=np.degrees(np.arctan2(df.y, df.x))%360
df['time']=pd.to_datetime('1979-01-01T00:00:00Z')+pd.to_timedelta(df['time'], unit='h')
df['date']=df['time'].dt.strftime('%Y%m%d')
df['date']=df['date'].astype(int)
df['HrMn']=df['time'].dt.strftime('%H00')
df['type']='default'
df['wind_type']='default'
convert_to_knot = True
integer_data = False
cartesian = True

In [4]:
df

Out[4]:
date HrMn type dir speed wind_type
0 19560820 0000 FM-12 340 2.1 N
1 19560820 0600 FM-12 360 2.1 N
2 19560820 0900 FM-12 360 2.1 N
3 19560820 1800 FM-12 999 0.0 C
4 19560820 2100 FM-12 999 0.0 C
5 19560821 0000 FM-12 140 4.1 N
6 19560821 0300 FM-12 160 2.1 N
7 19560821 0600 FM-12 180 2.1 N
8 19560821 0900 FM-12 160 6.2 N
9 19560821 1200 FM-12 200 5.1 N
10 19560821 1800 FM-12 180 5.1 N
11 19560821 2100 FM-12 140 5.1 N
12 19560822 0000 FM-12 160 7.2 N
13 19560822 0300 FM-12 180 7.2 N
14 19560822 0600 FM-12 200 7.2 N
15 19560822 1200 FM-12 290 7.2 N
16 19560822 1800 FM-12 340 4.1 N
17 19560823 0000 FM-12 340 5.1 N
18 19560823 0300 FM-12 340 4.1 N
19 19560823 0600 FM-12 360 4.1 N
20 19560823 0900 FM-12 320 2.1 N
21 19560823 1800 FM-12 320 2.1 N
22 19560823 2100 FM-12 250 2.1 N
23 19560824 0000 FM-12 320 2.1 N
24 19560824 0300 FM-12 999 0.0 C
25 19560824 0600 FM-12 140 3.1 N
26 19560824 0900 FM-12 180 2.1 N
27 19560824 1200 FM-12 999 0.0 C
28 19560824 1800 FM-12 999 0.0 C
29 19560824 2100 FM-12 110 1.0 N
... ... ... ... ... ... ...
222884 20160131 1800 FM-15 350 3.0 N
222885 20160131 1900 FM-15 340 2.0 N
222886 20160131 2000 FM-15 320 2.0 N
222887 20160131 2100 FM-12 70 3.0 N
222888 20160131 2200 FM-15 350 1.0 N
222889 20160131 2300 FM-15 330 2.0 N
222890 20160201 0000 FM-15 350 1.0 N
222891 20160201 0100 FM-15 999 0.0 C
222892 20160201 0200 FM-15 330 2.0 V
222893 20160201 0300 FM-15 320 3.0 N
222894 20160201 0400 FM-15 330 2.0 V
222895 20160201 0500 FM-15 340 4.0 V
222896 20160201 0600 FM-15 330 3.0 V
222897 20160201 0700 FM-15 330 2.0 V
222898 20160201 0800 FM-15 330 2.0 V
222899 20160201 0900 FM-15 310 2.0 N
222900 20160201 1000 FM-15 340 3.0 N
222901 20160201 1100 FM-15 330 2.0 N
222902 20160201 1200 FM-15 340 2.0 N
222903 20160201 1300 FM-15 350 3.0 N
222904 20160201 1400 FM-15 320 2.0 N
222905 20160201 1500 FM-15 300 2.0 N
222906 20160201 1600 FM-15 330 1.0 N
222907 20160201 1700 FM-15 290 2.0 N
222908 20160201 1800 FM-15 310 2.0 N
222909 20160201 1900 FM-15 280 2.0 N
222910 20160201 2000 FM-15 270 2.0 N
222911 20160201 2100 FM-15 230 2.0 N
222912 20160201 2200 FM-15 250 2.0 N
222913 20160201 2300 FM-15 260 2.0 N

222914 rows Ã— 6 columns

In [5]:
if 'NCDC' in file_path:
lat, long = get_lat_long(file_path)
print(lat,long)
map_osm = folium.Map(location=[lat, long], zoom_start=4)
display(map_osm)

31.78 117.298

In [6]:
df['time']=pd.to_datetime(df["date"].astype(str).map(str) + df["HrMn"], format='%Y%m%d%H%M')
df.set_index(['time'], inplace=True)
df['HrMn']=df['HrMn'].astype(int)
df = df.query("(dir <= 999) & (speed < 100) ")['1970':'2016']

In [7]:
plot_speed_and_angle_distribution(df.speed, df.dir)

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

In [8]:
# Dir [10,360]=> [0,350]
df['dir'] = df['dir'].apply(lambda x: x%360 if x < 999 else x)
# Convert Windrose coordianates to Polar Cooridinates
if 'cartesian' in globals():
df['dir_windrose'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
else:
df['dir_windrose'] = df['dir']
df['dir'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
display(df.describe())
df.plot(y='speed',legend=True,figsize=(20,5))

date HrMn dir speed dir_windrose
count 2.010630e+05 201063.000000 201063.000000 201063.000000 201063.000000
mean 2.000563e+07 1088.860506 232.890288 2.646362 221.360320
std 1.186954e+05 703.079666 259.049020 1.672618 255.875427
min 1.973010e+07 0.000000 0.000000 0.000000 0.000000
25% 1.992112e+07 500.000000 60.000000 2.000000 70.000000
50% 2.004071e+07 1000.000000 160.000000 2.000000 140.000000
75% 2.010042e+07 1800.000000 300.000000 4.000000 280.000000
max 2.016020e+07 2300.000000 999.000000 24.000000 999.000000
Out[8]:
<matplotlib.axes._subplots.AxesSubplot at 0xc7b45c0>

## 1.3 General Data Info¶

### 1.3.1 Unit Detection¶

In [9]:
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='m/s', figsize=(4, 3))

if 'convert_to_knot' not in globals():
convert_to_knot = True if len(df.query('decimal >= 0.2')) / len(df) > 0.3 else False

if convert_to_knot:
knot_unit = True
df['speed'] = df['speed'] * 1.943845
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='knot')
# need more elaboration, some is not near an integer
if integer_data:
df['speed'] = df['speed'].apply(lambda x: int(round(x)))
plt_configure(xlabel='Decimal', ylabel='Frequency', legend={'loc': 'best'}, title='Decimal Distribution')
else:
if 'knot_unit' not in globals():
knot_unit = False

df.drop(['decimal'], 1,inplace=True)
print(knot_unit)

False

In [10]:
dir_unit_text = ' (degree)'
if knot_unit == True:
speed_unit_text = ' (knot)'
else:
speed_unit_text = ' (m/s)'


### 1.3.2 Sampling Type Selection¶

In [11]:
sample_type = df.query('date > 20000000')['type']
sample_type.value_counts().plot(
kind = 'bar', title = 'Report Types Comprisement', figsize=(4,3))

report_type_most_used = sample_type.value_counts().argmax()
df = df.query("type==@report_type_most_used")


### 1.3.3 Sampling Time Selection¶

In [12]:
MID_YEAR = int(np.average(df.index.year))

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5,label='Overall')
df[str(MID_YEAR):]['HrMn'].value_counts().sort_index().plot(
kind='bar', alpha=0.5, label='>= %s' %  MID_YEAR )

plt_configure(xlabel='Sampling Time', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 4),
title = 'Sampling Time Distribution, Overall and > %s ' %  MID_YEAR)

In [13]:
df['sample_time'] = df.HrMn % 100
sample_time = df['2000':]['sample_time']
sample_times = sample_time.value_counts()[sample_time.value_counts() > 2000]
sample_times = sample_times.index.tolist()
# df = df.query("sample_time in @sample_times")
df = df.query("sample_time == @sample_times[0]")
df.drop(['sample_time'], 1,inplace=True)
print(sample_times)

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5, figsize=(10, 4))

[0]

Out[13]:
<matplotlib.axes._subplots.AxesSubplot at 0xce2d160>

## 1.4 Error Data handling and Adjustment¶

### 1.4.1 Artefacts¶

wrong direction record

In [14]:
if integer_data:
display(df.query("(dir % 10 >= 0.1) & (dir != 999)"))
df = df.query('(dir % 10 <= 0.1) | (dir == 999)')

date HrMn type dir speed wind_type dir_windrose
time

sudden increase in speed

In [15]:
# sudden increse
df['incre'] = df.speed.diff(1)
df['incre'].fillna(0, inplace=True)
df['incre_reverse'] = df.speed.diff(-1)
df['incre_reverse'].fillna(0, inplace=True)

df['incre'].plot(kind='hist', bins=arange(-15, 15), legend=True, figsize=(8, 3))

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
2002-03-20 10:00:00 20020320 1000 FM-15 120 14.0 N 330 10.0 4.0
2012-04-02 10:00:00 20120402 1000 FM-15 140 13.0 N 310 10.0 4.0
2005-12-21 04:00:00 20051221 400 FM-15 140 13.0 N 310 2.0 2.0
2010-03-19 23:00:00 20100319 2300 FM-15 120 12.0 N 330 10.0 7.0
2004-03-16 23:00:00 20040316 2300 FM-15 50 12.0 N 40 1.0 7.0
2001-11-13 05:00:00 20011113 500 FM-15 140 12.0 N 310 0.0 1.0
2015-05-10 22:00:00 20150510 2200 FM-15 150 12.0 N 300 2.0 2.0
2002-04-15 17:00:00 20020415 1700 FM-15 140 12.0 N 310 9.0 5.0
2001-11-13 04:00:00 20011113 400 FM-15 140 12.0 N 310 6.0 0.0
2009-06-05 13:00:00 20090605 1300 FM-15 50 12.0 N 40 9.0 7.0
Out[15]:
<matplotlib.axes._subplots.AxesSubplot at 0xd46ff60>
In [16]:
incre_threshold = 20 if knot_unit else 10
print('sudden increase number', len(df.query('(incre > @incre_threshold )&(incre_reverse > @incre_threshold )')))
df = df.query('(incre < @incre_threshold )|(incre_reverse < @incre_threshold )')

# Check the max speed
df.drop(['incre', 'incre_reverse'], 1, inplace=True)

sudden increase number 0

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
2002-03-20 10:00:00 20020320 1000 FM-15 120 14.0 N 330 10.0 4.0
2005-12-21 04:00:00 20051221 400 FM-15 140 13.0 N 310 2.0 2.0
2012-04-02 10:00:00 20120402 1000 FM-15 140 13.0 N 310 10.0 4.0
2009-06-05 13:00:00 20090605 1300 FM-15 50 12.0 N 40 9.0 7.0
2010-03-19 23:00:00 20100319 2300 FM-15 120 12.0 N 330 10.0 7.0
2004-03-16 23:00:00 20040316 2300 FM-15 50 12.0 N 40 1.0 7.0
2002-04-15 17:00:00 20020415 1700 FM-15 140 12.0 N 310 9.0 5.0
2015-05-10 22:00:00 20150510 2200 FM-15 150 12.0 N 300 2.0 2.0
2001-11-13 04:00:00 20011113 400 FM-15 140 12.0 N 310 6.0 0.0
2001-11-13 05:00:00 20011113 500 FM-15 140 12.0 N 310 0.0 1.0

### 1.4.2 Direction re-aligment¶

For some dataset, the 16 sectors are not record properly,

e.g. the sectors are [0,20,50 ...], need to redistribute the angle into 22.5, e.g. [0, 22.5, 45...]

In [17]:
display(df['dir'].value_counts().sort_index())
effective_column = df.query('dir < 999')['dir'].value_counts()[df['dir'].value_counts() > 30].sort_index()
if integer_data:
SECTOR_LENGTH = 360/len(effective_column)
else:
SECTOR_LENGTH = 10
print(len(effective_column), SECTOR_LENGTH)

0      4049
10     4013
20     3812
30     4258
40     3997
50     3787
60     3138
70     2750
80     2232
90     2104
100    1627
110    1383
120    1603
130    2040
140    2204
150    2560
160    2479
170    2118
180    1603
190     996
200     772
210     788
220     797
230     915
240     956
250    1226
260    1622
270    1728
280    2201
290    3376
300    4231
310    3698
320    2899
330    2766
340    2958
350    3468
999    6527
Name: dir, dtype: int64
36 10.0

In [18]:
df=realign_direction(df, effective_column)


### 1.4.3 0 Speed¶

In [19]:
with_too_many_zero, null_wind_frequency = is_with_too_many_zero(df['2005':])
delete_zero = with_too_many_zero
if delete_zero:
df = df.query('(speed > 0)')
print(delete_zero, null_wind_frequency)

False 0.0486580061806

In [20]:
print(df.query('dir == 999')['speed'].value_counts())
df=fill_direction_999(df, SECTOR_LENGTH)

0.0    4826
1.0    1251
2.0     368
3.0      77
4.0       5
Name: speed, dtype: int64


## 1.5 Time Shift Comparison¶

In [21]:
DIR_REDISTRIBUTE = 'even'
if DIR_REDISTRIBUTE == 'even':
DIR_BIN = arange(-5, 360, 10)
elif DIR_REDISTRIBUTE == 'round_up':
DIR_BIN = arange(0, 360+10, 10)

# Comparison between mid_year, looking for:
# 1. Odd Even Bias
# 2. Time Shift of Wind Speed Distribution
bins = arange(0, df.speed.max() + 1)
df[:str(MID_YEAR)]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='> %s' % MID_YEAR)

plt.suptitle('Speed Comparison between year < %s, > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Speed', ylabel='Frequency', legend=True, figsize=(8, 3))

In [22]:
df[:str(MID_YEAR)]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='> %s' % MID_YEAR)

plt.suptitle('Dir Comparison between year < %s, and > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Dir', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 3), tight='x')

In [23]:
display(df[df['dir'].isnull()])
df.dropna(subset=['dir'], inplace=True)

date HrMn type dir speed wind_type dir_windrose
time
In [24]:
# Inspect the time shift of speed and degree distribution, and odd-even bias
check_time_shift(df, speed_unit_text=speed_unit_text, dir_unit_text=dir_unit_text)

2001 - 2005

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

2006 - 2010

2011 - 2015

In [25]:
df.resample('A').mean().plot(y='speed')
plt.gca().set_ylim(bottom=0)
df.resample('M').mean().plot(y='speed', figsize=(20,4))
plt.gca().set_ylim(bottom=0)

Out[25]:
(0, 4.5)
In [26]:
%%time
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
den, _ = np.histogram(df[column], bins=bins, density=True)
y_top=max(den)*1.2
for year in arange(1980, 2016):
end_year = year
sub_df = df[str(year):str(end_year)]
if len(sub_df) > 1000:
plt.figure()
df[column].hist(bins=bins, alpha=0.3, normed=True)
sub_df[column].hist(bins=bins, alpha=0.5, figsize=(3,1.5), normed=True)
plt.gca().set_ylim(top=y_top)
plt_configure(title=str(year))
align_figures()

Wall time: 7.82 s

In [27]:
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
density_all, _ = np.histogram(df[column], bins=bins, density=True)
df[column].hist(bins=bins, figsize=(5,3))

R_squares = []
years = []
for year in arange(1980, 2016):
start_year, end_year = year-1, year+1
sub_df = df[str(start_year):str(end_year)]
if len(sub_df) > 1000:
density, _ = np.histogram(sub_df[column], bins=bins, density=True)
y_mean = np.mean(density_all)
SS_tot = np.sum(np.power(density_all - y_mean, 2))
SS_res = np.sum(np.power(density_all - density, 2))

R_square = 1 - SS_res / SS_tot
R_squares.append(R_square)
years.append(year)

plt.figure()
plot(years, R_squares)
ylim = max(min(plt.gca().get_ylim()[0],0.85),0)
plt.gca().set_ylim(bottom=ylim, top=1)
plt_configure(figsize=(5,3))
align_figures()


## 1.6 Re-distribute Direction and Speed (Optional)¶

e.g. Dir 50 -> -45 ~ 55, to make KDE result better

In [28]:
if integer_data:
df = randomize_angle(df, DIR_REDISTRIBUTE, SECTOR_LENGTH)

In [29]:
if integer_data:
if delete_zero:
redistribute_method = 'down'
else:
redistribute_method = 'up'

df, speed_redistribution_info = randomize_speed(df, redistribute_method)

Redistribute upward, e.g. 0 -> [0,1]


## 1.7 Generate (x,y) from (speed,dir)¶

In [30]:
# Cook orientation
# df['dir']= (df['dir'] + 180)%360

In [31]:
# There might be a small dot in the centre, which is due to too many zero (more than 1 speed) in center
# Scatter plot in matplot has performance issue, the speed is very slow
df['x'] = df['speed'] * cos(df['dir'] * pi / 180.0)
df['y'] = df['speed'] * sin(df['dir'] * pi / 180.0)


# 2. Re-select Data and Overview¶

## 2.1 Data Overview¶

In [32]:
## Summery of the data selection
print('Knot unit?', knot_unit)
print('Report type used:', report_type_most_used)
print('Sampling time used:', sample_times)
if 'speed_redistribution_info' in globals():
print('Speed redistribution info:', speed_redistribution_info )

df_all_years = df # for later across-year comparison
df = df_all_years['2011':'2015']
# df = df.query('(HrMn == 0) and (speed >= 0.5) and (date%10000 > 900) and (date%10000 < 1000)' )
df.describe()

Knot unit? False
Report type used: FM-15
Sampling time used: [0]
Speed redistribution info: Redistribute upward, e.g. 0 -> [0,1]

Out[32]:
date HrMn dir speed dir_windrose x y
count 3.555300e+04 35553.000000 35553.000000 35553.000000 35553.000000 35553.000000 35553.000000
mean 2.013290e+07 1163.080471 164.685973 3.384006 215.826400 0.898205 0.184083
std 1.384736e+04 682.019610 119.491613 1.599733 252.099519 2.612942 2.518494
min 2.011010e+07 0.000000 -4.995724 0.001138 0.000000 -10.835943 -9.859464
25% 2.012072e+07 600.000000 47.863324 2.278931 70.000000 -0.803215 -1.526956
50% 2.013111e+07 1200.000000 148.654352 3.219337 130.000000 1.326992 0.292626
75% 2.014121e+07 1700.000000 289.450501 4.317136 270.000000 2.691972 1.900851
max 2.015123e+07 2300.000000 355.000000 13.115997 999.000000 11.685250 10.331285
In [33]:
df.plot(y='speed',legend=True,figsize=(20,5))

Out[33]:
<matplotlib.axes._subplots.AxesSubplot at 0xdc41208>
In [34]:
# Accumulation by month
df.resample('M').count().plot(y='date', kind='bar',figsize=(20,4))

Out[34]:
<matplotlib.axes._subplots.AxesSubplot at 0x185ce780>
In [35]:
# 90 degree is east
ax = WindroseAxes.from_ax()
viridis = plt.get_cmap('viridis')
ax.bar(df.dir_windrose, df.speed, normed=True, opening=0.8, edgecolor='white', nsector=36, cmap=viridis)
ax.set_legend()

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\cbook.py:136: MatplotlibDeprecationWarning: The axisbg attribute was deprecated in version 2.0. Use facecolor instead.
warnings.warn(message, mplDeprecation, stacklevel=1)