In [ ]:
import pandas as pd
import numpy as np

from google.colab import drive
drive.mount('/content/drive/')
Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount("/content/drive/", force_remount=True).
In [ ]:
train_df = pd.read_csv('/content/drive/My Drive/spotify_train.csv')
test_df = pd.read_csv('/content/drive/My Drive/spotify_test.csv')

print(train_df.head())
print(test_df.head())
             artist_name                track_id  \
0  Johann Sebastian Bach  2HYMXZklHCHoTtCMbFQ7Xy   
1  Keep Shelly In Athens  4ClzmF0gOejJIaFFgB0Z8m   
2                  Jords  1U1G1YyX0hSFpstdRptJu5   
3          Cold War Kids  62f1o6kMaxj9jeXMpSDRmC   
4  Johann Sebastian Bach  1Uvetd30AXKqnipEzbTpKD   

                                   track_name  acousticness  danceability  \
0      アレグロ・アッサイ(J.S.バッハ:ソナタ 第3番 ハ長調 BWV1005)         0.987         0.466   
1                                     Eternal         0.643         0.553   
2                                    Tek Time         0.173         0.569   
3         Somethings Not Right With Me - Live         0.111         0.412   
4  Da der Herr Christ zu Tische sass, BWV 285         0.993         0.259   

   duration_ms  energy  instrumentalness  key  liveness  loudness  mode  \
0       310040   0.147            0.7480    0    0.0794   -19.590     1   
1       252000   0.602            0.0165    0    0.1360    -7.558     0   
2       259765   0.504            0.0000   10    0.1210    -7.599     0   
3       422600   0.763            0.0267    4    0.6750    -7.450     1   
4        67813   0.142            0.1220    0    0.1560   -19.502     0   

   speechiness    tempo  time_signature  valence  popularity  
0       0.0621  127.239               3    0.965           3  
1       0.0300   90.001               4    0.180          18  
2       0.4320  179.998               4    0.674          18  
3       0.0383  124.857               4    0.187          32  
4       0.0498   58.248               4    0.054          18  
          artist_name                track_id               track_name  \
0       Peter Warlock  772dRqYQxPvk0kujG9lLzj           The Full Heart   
1  Natalia Lafourcade  56jNC1XGBiCBbe130zxkqZ           Ciudad Hermosa   
2    Bipolar Sunshine  7i7cqLZ2CAZxW47WhfIov8                 Pressure   
3       Ben Salisbury  3ULapvPU8DcInhd4o5PhYJ  Approaching the Shimmer   
4      Julia Michaels  7poVnewfuGr6lSgdkqzxSJ          Jump - Acoustic   

   acousticness  danceability  duration_ms  energy  instrumentalness  key  \
0         0.983         0.186       267720  0.1060            0.9840    0   
1         0.671         0.402       101295  0.3070            0.0000    4   
2         0.174         0.800       222079  0.7320            0.0301    2   
3         0.971         0.173       112468  0.0178            0.8720    7   
4         0.773         0.580       194107  0.5210            0.0000    0   

   liveness  loudness  mode  speechiness    tempo  time_signature  valence  \
0    0.1340   -26.287     1       0.0442  130.814               4   0.0283   
1    0.3670   -12.069     1       0.0423  151.475               3   0.6630   
2    0.0745    -6.196     0       0.0544   99.966               4   0.5870   
3    0.1160   -25.866     0       0.0399   64.412               3   0.0336   
4    0.2380    -4.862     1       0.2220  181.815               4   0.7960   

   popularity  
0           1  
1          34  
2          33  
3          23  
4          59  
In [ ]:
train_df.drop('track_id', axis=1, inplace=True)
test_df.drop('track_id', axis=1, inplace=True)
In [ ]:
one_hot = pd.get_dummies(train_df['key'])
train_df = train_df.drop('key',axis = 1)
train_df = train_df.join(one_hot)

one_hot = pd.get_dummies(test_df['key'])
test_df = test_df.drop('key',axis = 1)
test_df = test_df.join(one_hot)

one_hot = pd.get_dummies(train_df['time_signature'])
train_df = train_df.drop('time_signature',axis = 1)
train_df = train_df.join(one_hot, lsuffix='_L', rsuffix='_R')

one_hot = pd.get_dummies(test_df['time_signature'])
test_df = test_df.drop('time_signature',axis = 1)
test_df = test_df.join(one_hot, lsuffix='_L', rsuffix='_R')

train_df.head()
Out[ ]:
artist_name track_name acousticness danceability duration_ms energy instrumentalness liveness loudness mode speechiness tempo valence popularity 0_L 1_L 2 3_L 4_L 5_L 6 7 8 9 10 11 0_R 1_R 3_R 4_R 5_R
0 Johann Sebastian Bach アレグロ・アッサイ(J.S.バッハ:ソナタ 第3番 ハ長調 BWV1005) 0.987 0.466 310040 0.147 0.7480 0.0794 -19.590 1 0.0621 127.239 0.965 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
1 Keep Shelly In Athens Eternal 0.643 0.553 252000 0.602 0.0165 0.1360 -7.558 0 0.0300 90.001 0.180 18 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
2 Jords Tek Time 0.173 0.569 259765 0.504 0.0000 0.1210 -7.599 0 0.4320 179.998 0.674 18 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0
3 Cold War Kids Somethings Not Right With Me - Live 0.111 0.412 422600 0.763 0.0267 0.6750 -7.450 1 0.0383 124.857 0.187 32 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0
4 Johann Sebastian Bach Da der Herr Christ zu Tische sass, BWV 285 0.993 0.259 67813 0.142 0.1220 0.1560 -19.502 0 0.0498 58.248 0.054 18 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
In [ ]:
pd.set_option('display.max_columns', 100)
train_df.head()
Out[ ]:
artist_name track_name acousticness danceability duration_ms energy instrumentalness liveness loudness mode speechiness tempo valence popularity 0_L 1_L 2 3_L 4_L 5_L 6 7 8 9 10 11 0_R 1_R 3_R 4_R 5_R
0 Johann Sebastian Bach アレグロ・アッサイ(J.S.バッハ:ソナタ 第3番 ハ長調 BWV1005) 0.987 0.466 310040 0.147 0.7480 0.0794 -19.590 1 0.0621 127.239 0.965 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
1 Keep Shelly In Athens Eternal 0.643 0.553 252000 0.602 0.0165 0.1360 -7.558 0 0.0300 90.001 0.180 18 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
2 Jords Tek Time 0.173 0.569 259765 0.504 0.0000 0.1210 -7.599 0 0.4320 179.998 0.674 18 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0
3 Cold War Kids Somethings Not Right With Me - Live 0.111 0.412 422600 0.763 0.0267 0.6750 -7.450 1 0.0383 124.857 0.187 32 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0
4 Johann Sebastian Bach Da der Herr Christ zu Tische sass, BWV 285 0.993 0.259 67813 0.142 0.1220 0.1560 -19.502 0 0.0498 58.248 0.054 18 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
In [ ]:
 
In [ ]:
from sklearn.preprocessing import StandardScaler

x_train_scale = train_df.drop(train_df.columns[[0,1,9,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]], axis=1)
x_test_scale = test_df.drop(test_df.columns[[0,1,9,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]], axis=1)

x_train_categorical = train_df.drop(train_df.columns[[0,1,2,3,4,5,6,7,8,10,11,12,13]], axis=1)
x_test_categorical = test_df.drop(test_df.columns[[0,1,2,3,4,5,6,7,8,10,11,12,13]], axis=1)

x_train_text = train_df.iloc[:,0]
x_test_text = test_df.iloc[:,0]
In [ ]:
sc = StandardScaler()
x_train_scale = pd.DataFrame(sc.fit_transform(x_train_scale))  
x_test_scale = pd.DataFrame(sc.transform(x_test_scale))

x_train = pd.concat([x_train_text, x_train_scale, x_train_categorical], axis = 1, sort = False)
x_test = pd.concat([x_test_text, x_test_scale, x_test_categorical], axis = 1, sort = False)

x_train.head()
/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/data.py:645: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
  return self.partial_fit(X, y)
/usr/local/lib/python3.6/dist-packages/sklearn/base.py:464: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
  return self.fit(X, **fit_params).transform(X)
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
  This is separate from the ipykernel package so we can avoid doing imports until
Out[ ]:
artist_name 0 1 2 3 4 5 6 7 8 9 mode 0_L 1_L 2 3_L 4_L 5_L 6 7 8 9 10 11 0_R 1_R 3_R 4_R 5_R
0 Johann Sebastian Bach 1.896333 -0.614581 0.785263 -1.641646 1.424748 -0.686219 -1.480817 -0.403829 0.252540 2.027031 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
1 Keep Shelly In Athens 0.894300 -0.156045 0.318292 0.118395 -0.587917 -0.347800 0.367932 -0.661863 -0.984140 -0.995205 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
2 Jords -0.474756 -0.071716 0.380767 -0.260691 -0.633315 -0.437487 0.361632 2.569594 2.004676 0.906686 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0
3 Cold War Kids -0.655355 -0.899190 1.690884 0.741178 -0.559852 2.874949 0.384526 -0.595144 0.173434 -0.968255 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0
4 Johann Sebastian Bach 1.913810 -1.705582 -1.163616 -1.660987 -0.297642 -0.228218 -1.467296 -0.502702 -2.038663 -1.480302 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
In [ ]:
x_train['artist_name'] = x_train['artist_name'].str.lower()
x_test['artist_name'] = x_test['artist_name'].str.lower()
In [ ]:
from gensim.models import Word2Vec

artist_names = [row.split() for row in x_train['artist_name']]
model_artist = Word2Vec(artist_names)
In [ ]:
model_artist.save('model_artist.bin')
In [ ]:
#from sklearn.decomposition import PCA
#from matplotlib import pyplot

# fit a 2d PCA model to the vectors
#X = model_artist[model_artist.wv.vocab]
#pca = PCA(n_components=2)
#result = pca.fit_transform(X)
# create a scatter plot of the projection
#pyplot.scatter(result[:, 0], result[:, 1])
#words = list(model_artist.wv.vocab)
#for i, word in enumerate(words):
	#pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))

#N = 5
#params = pyplot.gcf()
#plSize = params.get_size_inches()
#params.set_size_inches( (plSize[0]*N, plSize[1]*N) )
#pyplot.show()
In [ ]:
y_train = train_df['popularity']
y_test = test_df['popularity']
In [ ]:
#from sklearn.feature_extraction.text import TfidfVectorizer
#tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', ngram_range=(1, 2))

#features = tfidf.fit_transform(x_train['artist_name']).toarray()
#features.shape
Out[ ]:
(93097, 11136)
In [ ]:
#import pickle

#with open('tfidf.pkl', 'wb') as fout:
  #pickle.dump(tfidf, fout)
In [ ]:
model_artist.wv.most_similar('')
/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.
  if np.issubdtype(vec.dtype, np.int):
Out[ ]:
[('found', 0.9928961396217346),
 ('first', 0.9918167591094971),
 ('heights', 0.991557240486145),
 ('steve', 0.9914349317550659),
 ('b.', 0.9911473989486694),
 ('matthew', 0.9911277294158936),
 ('hayley', 0.9907199144363403),
 ('hubert', 0.990691065788269),
 ('gun', 0.9906023144721985),
 ('asian', 0.9905096888542175)]
In [ ]:
from tqdm import tqdm
import numpy as np

artist_vec = []
artist_avg_vec = []
count = 0
for row in tqdm(range(len(artist_names))):
  [word.split(' ', 1) for word in artist_names[row]]
  for i in range(len(artist_names[row])):
    try:
      artist_vec.append(model_artist[artist_names[row][i]])
      count = count + 1
    except KeyError as e:
      artist_vec.append([0]*100)
  
  average = np.add.reduce(artist_vec)
  if count==0:
    count = 1
  average = np.divide(average, count)
  artist_avg_vec.append(average)
  artist_vec = []
  count = 0
  0%|          | 0/93097 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:11: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
  # This is added back by InteractiveShellApp.init_path()
100%|██████████| 93097/93097 [00:01<00:00, 47948.20it/s]
In [ ]:
artist_avg_vec = np.array(artist_avg_vec)
In [ ]:
artist_avg_vec.shape
Out[ ]:
(93097, 100)
In [ ]:
artist_avg_vec = artist_avg_vec.reshape(93097, 100, 1)
In [ ]:
x_train = x_train.drop('artist_name', 1)
In [ ]:
x_train = np.concatenate((x_train, artist_avg_vec), axis=1)
x_train.shape
Out[ ]:
(93097, 128)
In [ ]:
x_train = x_train.reshape(93097, 128, 1)
In [ ]:
import tensorflow as tf

shape = (100,1)

model = tf.keras.models.Sequential()

model.add(tf.keras.layers.Conv1D(64, kernel_size=3, activation=tf.nn.relu, input_shape=shape))
model.add(tf.keras.layers.Conv1D(64, kernel_size=3, activation=tf.nn.relu))

model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
          
model.add(tf.keras.layers.Conv1D(128, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(128, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
          
model.add(tf.keras.layers.Conv1D(256, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(256, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
          
model.add(tf.keras.layers.Conv1D(512, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(512, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=5))

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.BatchNormalization())
          
model.add(tf.keras.layers.Dense(400, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(200, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, kernel_initializer='normal'))


model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/losses_utils.py:170: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
In [ ]:
#features = features.reshape(93097, 11136, 1)
In [ ]:
checkpoint = tf.keras.callbacks.ModelCheckpoint("artist_name.model", monitor='val_loss', verbose=1, save_best_only=True)
model1 = model.fit(artist_avg_vec, y_train, epochs=50, validation_split=0.2, callbacks=[checkpoint])
Train on 74477 samples, validate on 18620 samples
Epoch 1/50
74432/74477 [============================>.] - ETA: 0s - loss: 316.5963 - mean_squared_error: 316.5966
Epoch 00001: val_loss improved from inf to 313.41138, saving model to artist_name.model
74477/74477 [==============================] - 23s 307us/sample - loss: 316.5932 - mean_squared_error: 316.5935 - val_loss: 313.4114 - val_mean_squared_error: 313.4113
Epoch 2/50
74464/74477 [============================>.] - ETA: 0s - loss: 313.3662 - mean_squared_error: 313.3663
Epoch 00002: val_loss improved from 313.41138 to 311.51383, saving model to artist_name.model
74477/74477 [==============================] - 21s 278us/sample - loss: 313.3473 - mean_squared_error: 313.3474 - val_loss: 311.5138 - val_mean_squared_error: 311.5139
Epoch 3/50
74336/74477 [============================>.] - ETA: 0s - loss: 312.2585 - mean_squared_error: 312.2584
Epoch 00003: val_loss did not improve from 311.51383
74477/74477 [==============================] - 21s 283us/sample - loss: 312.1847 - mean_squared_error: 312.1845 - val_loss: 327.5789 - val_mean_squared_error: 327.5789
Epoch 4/50
74368/74477 [============================>.] - ETA: 0s - loss: 311.3069 - mean_squared_error: 311.3065
Epoch 00004: val_loss did not improve from 311.51383
74477/74477 [==============================] - 20s 274us/sample - loss: 311.3013 - mean_squared_error: 311.3009 - val_loss: 662.4164 - val_mean_squared_error: 662.4167
Epoch 5/50
74368/74477 [============================>.] - ETA: 0s - loss: 311.3747 - mean_squared_error: 311.3744
Epoch 00005: val_loss improved from 311.51383 to 310.64705, saving model to artist_name.model
74477/74477 [==============================] - 21s 288us/sample - loss: 311.3102 - mean_squared_error: 311.3099 - val_loss: 310.6470 - val_mean_squared_error: 310.6470
Epoch 6/50
74272/74477 [============================>.] - ETA: 0s - loss: 310.1404 - mean_squared_error: 310.1405
Epoch 00006: val_loss did not improve from 310.64705
74477/74477 [==============================] - 20s 275us/sample - loss: 310.2084 - mean_squared_error: 310.2085 - val_loss: 414.4753 - val_mean_squared_error: 414.4756
Epoch 7/50
74304/74477 [============================>.] - ETA: 0s - loss: 309.3183 - mean_squared_error: 309.3182
Epoch 00007: val_loss did not improve from 310.64705
74477/74477 [==============================] - 20s 274us/sample - loss: 309.3221 - mean_squared_error: 309.3220 - val_loss: 314.6951 - val_mean_squared_error: 314.6952
Epoch 8/50
74464/74477 [============================>.] - ETA: 0s - loss: 310.2330 - mean_squared_error: 310.2331
Epoch 00008: val_loss did not improve from 310.64705
74477/74477 [==============================] - 21s 278us/sample - loss: 310.2029 - mean_squared_error: 310.2030 - val_loss: 313.8138 - val_mean_squared_error: 313.8137
Epoch 9/50
74336/74477 [============================>.] - ETA: 0s - loss: 309.2257 - mean_squared_error: 309.2258
Epoch 00009: val_loss did not improve from 310.64705
74477/74477 [==============================] - 21s 284us/sample - loss: 309.2053 - mean_squared_error: 309.2054 - val_loss: 316.6201 - val_mean_squared_error: 316.6204
Epoch 10/50
74368/74477 [============================>.] - ETA: 0s - loss: 308.8350 - mean_squared_error: 308.8350
Epoch 00010: val_loss improved from 310.64705 to 308.89961, saving model to artist_name.model
74477/74477 [==============================] - 20s 274us/sample - loss: 308.8007 - mean_squared_error: 308.8008 - val_loss: 308.8996 - val_mean_squared_error: 308.8994
Epoch 11/50
74432/74477 [============================>.] - ETA: 0s - loss: 308.3697 - mean_squared_error: 308.3699
Epoch 00011: val_loss did not improve from 308.89961
74477/74477 [==============================] - 20s 273us/sample - loss: 308.4052 - mean_squared_error: 308.4054 - val_loss: 309.8151 - val_mean_squared_error: 309.8152
Epoch 12/50
74432/74477 [============================>.] - ETA: 0s - loss: 307.9430 - mean_squared_error: 307.9431
Epoch 00012: val_loss did not improve from 308.89961
74477/74477 [==============================] - 21s 282us/sample - loss: 307.8887 - mean_squared_error: 307.8888 - val_loss: 311.7082 - val_mean_squared_error: 311.7082
Epoch 13/50
74368/74477 [============================>.] - ETA: 0s - loss: 307.6218 - mean_squared_error: 307.6212
Epoch 00013: val_loss improved from 308.89961 to 308.55408, saving model to artist_name.model
74477/74477 [==============================] - 21s 281us/sample - loss: 307.5718 - mean_squared_error: 307.5711 - val_loss: 308.5541 - val_mean_squared_error: 308.5539
Epoch 14/50
74336/74477 [============================>.] - ETA: 0s - loss: 307.3687 - mean_squared_error: 307.3687
Epoch 00014: val_loss did not improve from 308.55408
74477/74477 [==============================] - 20s 273us/sample - loss: 307.3634 - mean_squared_error: 307.3634 - val_loss: 309.9153 - val_mean_squared_error: 309.9154
Epoch 15/50
74368/74477 [============================>.] - ETA: 0s - loss: 307.1821 - mean_squared_error: 307.1823
Epoch 00015: val_loss improved from 308.55408 to 307.09870, saving model to artist_name.model
74477/74477 [==============================] - 21s 277us/sample - loss: 307.1564 - mean_squared_error: 307.1566 - val_loss: 307.0987 - val_mean_squared_error: 307.0986
Epoch 16/50
74368/74477 [============================>.] - ETA: 0s - loss: 306.7455 - mean_squared_error: 306.7455
Epoch 00016: val_loss did not improve from 307.09870
74477/74477 [==============================] - 22s 302us/sample - loss: 306.7447 - mean_squared_error: 306.7448 - val_loss: 308.0663 - val_mean_squared_error: 308.0662
Epoch 17/50
74400/74477 [============================>.] - ETA: 0s - loss: 306.4861 - mean_squared_error: 306.4859
Epoch 00017: val_loss did not improve from 307.09870
74477/74477 [==============================] - 20s 274us/sample - loss: 306.4688 - mean_squared_error: 306.4686 - val_loss: 308.1023 - val_mean_squared_error: 308.1022
Epoch 18/50
74400/74477 [============================>.] - ETA: 0s - loss: 306.2966 - mean_squared_error: 306.2965
Epoch 00018: val_loss did not improve from 307.09870
74477/74477 [==============================] - 21s 282us/sample - loss: 306.2705 - mean_squared_error: 306.2704 - val_loss: 311.0146 - val_mean_squared_error: 311.0144
Epoch 19/50
74336/74477 [============================>.] - ETA: 0s - loss: 305.7552 - mean_squared_error: 305.7552
Epoch 00019: val_loss improved from 307.09870 to 306.40289, saving model to artist_name.model
74477/74477 [==============================] - 20s 274us/sample - loss: 305.6448 - mean_squared_error: 305.6447 - val_loss: 306.4029 - val_mean_squared_error: 306.4028
Epoch 20/50
74464/74477 [============================>.] - ETA: 0s - loss: 305.2521 - mean_squared_error: 305.2522
Epoch 00020: val_loss did not improve from 306.40289
74477/74477 [==============================] - 21s 288us/sample - loss: 305.2934 - mean_squared_error: 305.2935 - val_loss: 308.2676 - val_mean_squared_error: 308.2675
Epoch 21/50
74432/74477 [============================>.] - ETA: 0s - loss: 304.8608 - mean_squared_error: 304.8607
Epoch 00021: val_loss improved from 306.40289 to 306.13488, saving model to artist_name.model
74477/74477 [==============================] - 20s 274us/sample - loss: 304.8106 - mean_squared_error: 304.8105 - val_loss: 306.1349 - val_mean_squared_error: 306.1348
Epoch 22/50
74368/74477 [============================>.] - ETA: 0s - loss: 304.7158 - mean_squared_error: 304.7157
Epoch 00022: val_loss improved from 306.13488 to 305.88412, saving model to artist_name.model
74477/74477 [==============================] - 20s 274us/sample - loss: 304.8136 - mean_squared_error: 304.8136 - val_loss: 305.8841 - val_mean_squared_error: 305.8843
Epoch 23/50
74368/74477 [============================>.] - ETA: 0s - loss: 304.2400 - mean_squared_error: 304.2405
Epoch 00023: val_loss improved from 305.88412 to 304.64539, saving model to artist_name.model
74477/74477 [==============================] - 20s 275us/sample - loss: 304.2285 - mean_squared_error: 304.2290 - val_loss: 304.6454 - val_mean_squared_error: 304.6453
Epoch 24/50
74464/74477 [============================>.] - ETA: 0s - loss: 304.2631 - mean_squared_error: 304.2630
Epoch 00024: val_loss did not improve from 304.64539
74477/74477 [==============================] - 21s 287us/sample - loss: 304.2575 - mean_squared_error: 304.2574 - val_loss: 304.7504 - val_mean_squared_error: 304.7503
Epoch 25/50
74336/74477 [============================>.] - ETA: 0s - loss: 303.4944 - mean_squared_error: 303.4945
Epoch 00025: val_loss did not improve from 304.64539
74477/74477 [==============================] - 20s 274us/sample - loss: 303.3836 - mean_squared_error: 303.3837 - val_loss: 304.7042 - val_mean_squared_error: 304.7040
Epoch 26/50
74368/74477 [============================>.] - ETA: 0s - loss: 303.2963 - mean_squared_error: 303.2963
Epoch 00026: val_loss did not improve from 304.64539
74477/74477 [==============================] - 20s 273us/sample - loss: 303.2717 - mean_squared_error: 303.2717 - val_loss: 148585.5123 - val_mean_squared_error: 148585.4531
Epoch 27/50
74432/74477 [============================>.] - ETA: 0s - loss: 302.7415 - mean_squared_error: 302.7415
Epoch 00027: val_loss did not improve from 304.64539
74477/74477 [==============================] - 20s 274us/sample - loss: 302.7723 - mean_squared_error: 302.7723 - val_loss: 319.5774 - val_mean_squared_error: 319.5775
Epoch 28/50
74336/74477 [============================>.] - ETA: 0s - loss: 302.1564 - mean_squared_error: 302.1560
Epoch 00028: val_loss did not improve from 304.64539
74477/74477 [==============================] - 21s 287us/sample - loss: 302.2236 - mean_squared_error: 302.2231 - val_loss: 315.5294 - val_mean_squared_error: 315.5295
Epoch 29/50
74304/74477 [============================>.] - ETA: 0s - loss: 301.9588 - mean_squared_error: 301.9593
Epoch 00029: val_loss improved from 304.64539 to 302.17272, saving model to artist_name.model
74477/74477 [==============================] - 20s 275us/sample - loss: 301.9073 - mean_squared_error: 301.9078 - val_loss: 302.1727 - val_mean_squared_error: 302.1727
Epoch 30/50
74432/74477 [============================>.] - ETA: 0s - loss: 301.2965 - mean_squared_error: 301.2965
Epoch 00030: val_loss did not improve from 302.17272
74477/74477 [==============================] - 21s 279us/sample - loss: 301.2394 - mean_squared_error: 301.2394 - val_loss: 302.2039 - val_mean_squared_error: 302.2039
Epoch 31/50
74368/74477 [============================>.] - ETA: 0s - loss: 300.5974 - mean_squared_error: 300.5975
Epoch 00031: val_loss did not improve from 302.17272
74477/74477 [==============================] - 21s 286us/sample - loss: 300.6139 - mean_squared_error: 300.6141 - val_loss: 304.8928 - val_mean_squared_error: 304.8930
Epoch 32/50
74304/74477 [============================>.] - ETA: 0s - loss: 300.1379 - mean_squared_error: 300.1380
Epoch 00032: val_loss did not improve from 302.17272
74477/74477 [==============================] - 21s 287us/sample - loss: 300.1683 - mean_squared_error: 300.1685 - val_loss: 360.8590 - val_mean_squared_error: 360.8588
Epoch 33/50
74368/74477 [============================>.] - ETA: 0s - loss: 299.0411 - mean_squared_error: 299.0408
Epoch 00033: val_loss did not improve from 302.17272
74477/74477 [==============================] - 21s 281us/sample - loss: 299.0607 - mean_squared_error: 299.0604 - val_loss: 303.1087 - val_mean_squared_error: 303.1087
Epoch 34/50
74368/74477 [============================>.] - ETA: 0s - loss: 298.4680 - mean_squared_error: 298.4682
Epoch 00034: val_loss improved from 302.17272 to 299.98674, saving model to artist_name.model
74477/74477 [==============================] - 20s 274us/sample - loss: 298.3884 - mean_squared_error: 298.3886 - val_loss: 299.9867 - val_mean_squared_error: 299.9867
Epoch 35/50
74400/74477 [============================>.] - ETA: 0s - loss: 297.5260 - mean_squared_error: 297.5263
Epoch 00035: val_loss did not improve from 299.98674
74477/74477 [==============================] - 21s 276us/sample - loss: 297.5767 - mean_squared_error: 297.5771 - val_loss: 301.2046 - val_mean_squared_error: 301.2045
Epoch 36/50
74400/74477 [============================>.] - ETA: 0s - loss: 297.7338 - mean_squared_error: 297.7339
Epoch 00036: val_loss did not improve from 299.98674
74477/74477 [==============================] - 21s 285us/sample - loss: 297.6843 - mean_squared_error: 297.6845 - val_loss: 3818.2566 - val_mean_squared_error: 3818.2573
Epoch 37/50
74304/74477 [============================>.] - ETA: 0s - loss: 296.7335 - mean_squared_error: 296.7336
Epoch 00037: val_loss did not improve from 299.98674
74477/74477 [==============================] - 20s 274us/sample - loss: 296.6594 - mean_squared_error: 296.6595 - val_loss: 302.0339 - val_mean_squared_error: 302.0340
Epoch 38/50
74432/74477 [============================>.] - ETA: 0s - loss: 296.3127 - mean_squared_error: 296.3128
Epoch 00038: val_loss did not improve from 299.98674
74477/74477 [==============================] - 20s 274us/sample - loss: 296.2643 - mean_squared_error: 296.2643 - val_loss: 300.3424 - val_mean_squared_error: 300.3423
Epoch 39/50
74304/74477 [============================>.] - ETA: 0s - loss: 295.6651 - mean_squared_error: 295.6654
Epoch 00039: val_loss did not improve from 299.98674
74477/74477 [==============================] - 21s 280us/sample - loss: 295.6273 - mean_squared_error: 295.6276 - val_loss: 307.0725 - val_mean_squared_error: 307.0724
Epoch 40/50
74272/74477 [============================>.] - ETA: 0s - loss: 295.3821 - mean_squared_error: 295.3824
Epoch 00040: val_loss improved from 299.98674 to 297.52177, saving model to artist_name.model
74477/74477 [==============================] - 21s 282us/sample - loss: 295.3457 - mean_squared_error: 295.3460 - val_loss: 297.5218 - val_mean_squared_error: 297.5220
Epoch 41/50
74368/74477 [============================>.] - ETA: 0s - loss: 294.7961 - mean_squared_error: 294.7963
Epoch 00041: val_loss did not improve from 297.52177
74477/74477 [==============================] - 20s 274us/sample - loss: 294.7409 - mean_squared_error: 294.7411 - val_loss: 299.4075 - val_mean_squared_error: 299.4074
Epoch 42/50
74400/74477 [============================>.] - ETA: 0s - loss: 295.2980 - mean_squared_error: 295.2981
Epoch 00042: val_loss improved from 297.52177 to 295.58195, saving model to artist_name.model
74477/74477 [==============================] - 20s 275us/sample - loss: 295.2758 - mean_squared_error: 295.2759 - val_loss: 295.5820 - val_mean_squared_error: 295.5819
Epoch 43/50
74432/74477 [============================>.] - ETA: 0s - loss: 293.9868 - mean_squared_error: 293.9868
Epoch 00043: val_loss did not improve from 295.58195
74477/74477 [==============================] - 21s 283us/sample - loss: 294.0137 - mean_squared_error: 294.0138 - val_loss: 297.8615 - val_mean_squared_error: 297.8616
Epoch 44/50
74464/74477 [============================>.] - ETA: 0s - loss: 293.1330 - mean_squared_error: 293.1331
Epoch 00044: val_loss did not improve from 295.58195
74477/74477 [==============================] - 21s 279us/sample - loss: 293.1092 - mean_squared_error: 293.1092 - val_loss: 396.0918 - val_mean_squared_error: 396.0918
Epoch 45/50
74368/74477 [============================>.] - ETA: 0s - loss: 297.1996 - mean_squared_error: 297.1998
Epoch 00045: val_loss did not improve from 295.58195
74477/74477 [==============================] - 21s 283us/sample - loss: 297.2267 - mean_squared_error: 297.2270 - val_loss: 298.9982 - val_mean_squared_error: 298.9982
Epoch 46/50
74432/74477 [============================>.] - ETA: 0s - loss: 293.4058 - mean_squared_error: 293.4057
Epoch 00046: val_loss improved from 295.58195 to 294.30011, saving model to artist_name.model
74477/74477 [==============================] - 21s 284us/sample - loss: 293.3777 - mean_squared_error: 293.3776 - val_loss: 294.3001 - val_mean_squared_error: 294.3002
Epoch 47/50
74432/74477 [============================>.] - ETA: 0s - loss: 292.3224 - mean_squared_error: 292.3227
Epoch 00047: val_loss did not improve from 294.30011
74477/74477 [==============================] - 22s 289us/sample - loss: 292.3397 - mean_squared_error: 292.3400 - val_loss: 805.6155 - val_mean_squared_error: 805.6154
Epoch 48/50
74272/74477 [============================>.] - ETA: 0s - loss: 291.4408 - mean_squared_error: 291.4408
Epoch 00048: val_loss did not improve from 294.30011
74477/74477 [==============================] - 20s 274us/sample - loss: 291.4581 - mean_squared_error: 291.4581 - val_loss: 295.8103 - val_mean_squared_error: 295.8105
Epoch 49/50
74336/74477 [============================>.] - ETA: 0s - loss: 290.6390 - mean_squared_error: 290.6389
Epoch 00049: val_loss did not improve from 294.30011
74477/74477 [==============================] - 20s 274us/sample - loss: 290.6209 - mean_squared_error: 290.6208 - val_loss: 514.2521 - val_mean_squared_error: 514.2523
Epoch 50/50
74432/74477 [============================>.] - ETA: 0s - loss: 290.3677 - mean_squared_error: 290.3681
Epoch 00050: val_loss did not improve from 294.30011
74477/74477 [==============================] - 20s 274us/sample - loss: 290.3884 - mean_squared_error: 290.3888 - val_loss: 294.4491 - val_mean_squared_error: 294.4490
In [ ]:
model_1 = tf.keras.models.load_model('artist_name.model')
artist_avg_vec = artist_avg_vec.reshape(93097, 100, 1)
prediction = model_1.predict(artist_avg_vec)
In [ ]:
x_train = x_train.drop('artist_name', axis=1)
x_train = np.concatenate((x_train, prediction), axis=1)
x_train.shape
Out[ ]:
(93097, 29)
In [ ]:
x_train = x_train.reshape(93097, 29, 1)
In [ ]:
import tensorflow as tf

shape = (29,1)

model = tf.keras.models.Sequential()

model.add(tf.keras.layers.Conv1D(64, kernel_size=3, activation=tf.nn.relu, input_shape=shape))
model.add(tf.keras.layers.Conv1D(64, kernel_size=3, activation=tf.nn.relu))

model.add(tf.keras.layers.MaxPooling1D(pool_size=1))
          
model.add(tf.keras.layers.Conv1D(128, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(128, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=1))
          
model.add(tf.keras.layers.Conv1D(256, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(256, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=1))
          
model.add(tf.keras.layers.Conv1D(512, kernel_size=3, activation=tf.nn.relu))
model.add(tf.keras.layers.Conv1D(512, kernel_size=3, activation=tf.nn.relu))
          
model.add(tf.keras.layers.MaxPooling1D(pool_size=13))

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.BatchNormalization())
          
model.add(tf.keras.layers.Dense(400, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(200, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, kernel_initializer='normal'))


model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
In [ ]:
checkpoint = tf.keras.callbacks.ModelCheckpoint("spotify.model", monitor='val_loss', verbose=1, save_best_only=True)
model1 = model.fit(x_train, y_train, epochs=100, validation_split=0.2, callbacks=[checkpoint])
Train on 74477 samples, validate on 18620 samples
Epoch 1/100
74336/74477 [============================>.] - ETA: 0s - loss: 294.1875 - mean_squared_error: 294.1873
Epoch 00001: val_loss improved from inf to 283.78458, saving model to spotify.model
74477/74477 [==============================] - 27s 359us/sample - loss: 294.1838 - mean_squared_error: 294.1835 - val_loss: 283.7846 - val_mean_squared_error: 283.7848
Epoch 2/100
74304/74477 [============================>.] - ETA: 0s - loss: 277.7783 - mean_squared_error: 277.7783
Epoch 00002: val_loss did not improve from 283.78458
74477/74477 [==============================] - 22s 294us/sample - loss: 277.7185 - mean_squared_error: 277.7186 - val_loss: 340.4399 - val_mean_squared_error: 340.4397
Epoch 3/100
74304/74477 [============================>.] - ETA: 0s - loss: 272.7238 - mean_squared_error: 272.7239
Epoch 00003: val_loss improved from 283.78458 to 271.67150, saving model to spotify.model
74477/74477 [==============================] - 22s 294us/sample - loss: 272.7057 - mean_squared_error: 272.7058 - val_loss: 271.6715 - val_mean_squared_error: 271.6714
Epoch 4/100
74368/74477 [============================>.] - ETA: 0s - loss: 271.0902 - mean_squared_error: 271.0902
Epoch 00004: val_loss did not improve from 271.67150
74477/74477 [==============================] - 22s 292us/sample - loss: 271.1171 - mean_squared_error: 271.1170 - val_loss: 301.6272 - val_mean_squared_error: 301.6271
Epoch 5/100
74464/74477 [============================>.] - ETA: 0s - loss: 269.8673 - mean_squared_error: 269.8675
Epoch 00005: val_loss did not improve from 271.67150
74477/74477 [==============================] - 23s 307us/sample - loss: 269.8461 - mean_squared_error: 269.8463 - val_loss: 282.2588 - val_mean_squared_error: 282.2587
Epoch 6/100
74464/74477 [============================>.] - ETA: 0s - loss: 268.5515 - mean_squared_error: 268.5518
Epoch 00006: val_loss did not improve from 271.67150
74477/74477 [==============================] - 22s 293us/sample - loss: 268.5341 - mean_squared_error: 268.5344 - val_loss: 271.7782 - val_mean_squared_error: 271.7783
Epoch 7/100
74432/74477 [============================>.] - ETA: 0s - loss: 267.3362 - mean_squared_error: 267.3363
Epoch 00007: val_loss did not improve from 271.67150
74477/74477 [==============================] - 22s 292us/sample - loss: 267.3468 - mean_squared_error: 267.3469 - val_loss: 322.1592 - val_mean_squared_error: 322.1592
Epoch 8/100
74400/74477 [============================>.] - ETA: 0s - loss: 267.2764 - mean_squared_error: 267.2762
Epoch 00008: val_loss did not improve from 271.67150
74477/74477 [==============================] - 24s 319us/sample - loss: 267.2764 - mean_squared_error: 267.2762 - val_loss: 292.5808 - val_mean_squared_error: 292.5807
Epoch 9/100
74304/74477 [============================>.] - ETA: 0s - loss: 266.2121 - mean_squared_error: 266.2126
Epoch 00009: val_loss did not improve from 271.67150
74477/74477 [==============================] - 22s 297us/sample - loss: 266.2728 - mean_squared_error: 266.2732 - val_loss: 279.3941 - val_mean_squared_error: 279.3940
Epoch 10/100
74304/74477 [============================>.] - ETA: 0s - loss: 264.9287 - mean_squared_error: 264.9287
Epoch 00010: val_loss improved from 271.67150 to 267.94996, saving model to spotify.model
74477/74477 [==============================] - 22s 293us/sample - loss: 264.9959 - mean_squared_error: 264.9959 - val_loss: 267.9500 - val_mean_squared_error: 267.9499
Epoch 11/100
74400/74477 [============================>.] - ETA: 0s - loss: 264.7966 - mean_squared_error: 264.7964
Epoch 00011: val_loss did not improve from 267.94996
74477/74477 [==============================] - 22s 293us/sample - loss: 264.8132 - mean_squared_error: 264.8130 - val_loss: 279.6132 - val_mean_squared_error: 279.6133
Epoch 12/100
74432/74477 [============================>.] - ETA: 0s - loss: 264.4250 - mean_squared_error: 264.4250
Epoch 00012: val_loss did not improve from 267.94996
74477/74477 [==============================] - 23s 306us/sample - loss: 264.4160 - mean_squared_error: 264.4160 - val_loss: 268.6360 - val_mean_squared_error: 268.6361
Epoch 13/100
74336/74477 [============================>.] - ETA: 0s - loss: 263.5269 - mean_squared_error: 263.5269
Epoch 00013: val_loss improved from 267.94996 to 267.57421, saving model to spotify.model
74477/74477 [==============================] - 22s 293us/sample - loss: 263.4642 - mean_squared_error: 263.4642 - val_loss: 267.5742 - val_mean_squared_error: 267.5742
Epoch 14/100
74336/74477 [============================>.] - ETA: 0s - loss: 263.2097 - mean_squared_error: 263.2099
Epoch 00014: val_loss did not improve from 267.57421
74477/74477 [==============================] - 22s 292us/sample - loss: 263.2756 - mean_squared_error: 263.2758 - val_loss: 272.4988 - val_mean_squared_error: 272.4988
Epoch 15/100
74304/74477 [============================>.] - ETA: 0s - loss: 262.7829 - mean_squared_error: 262.7826
Epoch 00015: val_loss improved from 267.57421 to 267.45610, saving model to spotify.model
74477/74477 [==============================] - 22s 293us/sample - loss: 262.8282 - mean_squared_error: 262.8278 - val_loss: 267.4561 - val_mean_squared_error: 267.4561
Epoch 16/100
74400/74477 [============================>.] - ETA: 0s - loss: 260.8817 - mean_squared_error: 260.8815
Epoch 00016: val_loss improved from 267.45610 to 264.03856, saving model to spotify.model
74477/74477 [==============================] - 23s 307us/sample - loss: 260.8707 - mean_squared_error: 260.8705 - val_loss: 264.0386 - val_mean_squared_error: 264.0387
Epoch 17/100
74336/74477 [============================>.] - ETA: 0s - loss: 260.6756 - mean_squared_error: 260.6752
Epoch 00017: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 260.6910 - mean_squared_error: 260.6906 - val_loss: 272.9003 - val_mean_squared_error: 272.9005
Epoch 18/100
74368/74477 [============================>.] - ETA: 0s - loss: 259.9684 - mean_squared_error: 259.9683
Epoch 00018: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 259.9258 - mean_squared_error: 259.9257 - val_loss: 277.4895 - val_mean_squared_error: 277.4894
Epoch 19/100
74336/74477 [============================>.] - ETA: 0s - loss: 259.6934 - mean_squared_error: 259.6937
Epoch 00019: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 303us/sample - loss: 259.7104 - mean_squared_error: 259.7107 - val_loss: 266.7587 - val_mean_squared_error: 266.7587
Epoch 20/100
74432/74477 [============================>.] - ETA: 0s - loss: 258.2127 - mean_squared_error: 258.2128
Epoch 00020: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 294us/sample - loss: 258.2014 - mean_squared_error: 258.2016 - val_loss: 272.7773 - val_mean_squared_error: 272.7771
Epoch 21/100
74432/74477 [============================>.] - ETA: 0s - loss: 258.0113 - mean_squared_error: 258.0114
Epoch 00021: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 257.9626 - mean_squared_error: 257.9627 - val_loss: 267.8941 - val_mean_squared_error: 267.8941
Epoch 22/100
74464/74477 [============================>.] - ETA: 0s - loss: 256.9749 - mean_squared_error: 256.9750
Epoch 00022: val_loss did not improve from 264.03856
74477/74477 [==============================] - 24s 317us/sample - loss: 256.9611 - mean_squared_error: 256.9612 - val_loss: 265.2597 - val_mean_squared_error: 265.2597
Epoch 23/100
74464/74477 [============================>.] - ETA: 0s - loss: 255.6454 - mean_squared_error: 255.6454
Epoch 00023: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 255.6708 - mean_squared_error: 255.6709 - val_loss: 264.8572 - val_mean_squared_error: 264.8571
Epoch 24/100
74304/74477 [============================>.] - ETA: 0s - loss: 255.0342 - mean_squared_error: 255.0339
Epoch 00024: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 254.9863 - mean_squared_error: 254.9860 - val_loss: 272.6089 - val_mean_squared_error: 272.6090
Epoch 25/100
74304/74477 [============================>.] - ETA: 0s - loss: 254.2796 - mean_squared_error: 254.2796
Epoch 00025: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 254.3993 - mean_squared_error: 254.3993 - val_loss: 266.7320 - val_mean_squared_error: 266.7320
Epoch 26/100
74400/74477 [============================>.] - ETA: 0s - loss: 253.1856 - mean_squared_error: 253.1858
Epoch 00026: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 296us/sample - loss: 253.2607 - mean_squared_error: 253.2610 - val_loss: 268.8524 - val_mean_squared_error: 268.8524
Epoch 27/100
74432/74477 [============================>.] - ETA: 0s - loss: 252.7553 - mean_squared_error: 252.7551
Epoch 00027: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 302us/sample - loss: 252.8066 - mean_squared_error: 252.8064 - val_loss: 266.1876 - val_mean_squared_error: 266.1877
Epoch 28/100
74336/74477 [============================>.] - ETA: 0s - loss: 251.3935 - mean_squared_error: 251.3935
Epoch 00028: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 251.3648 - mean_squared_error: 251.3649 - val_loss: 749.8218 - val_mean_squared_error: 749.8218
Epoch 29/100
74304/74477 [============================>.] - ETA: 0s - loss: 250.7181 - mean_squared_error: 250.7182
Epoch 00029: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 250.6601 - mean_squared_error: 250.6602 - val_loss: 298.6960 - val_mean_squared_error: 298.6960
Epoch 30/100
74464/74477 [============================>.] - ETA: 0s - loss: 249.0660 - mean_squared_error: 249.0659
Epoch 00030: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 304us/sample - loss: 249.0584 - mean_squared_error: 249.0583 - val_loss: 273.1647 - val_mean_squared_error: 273.1648
Epoch 31/100
74304/74477 [============================>.] - ETA: 0s - loss: 248.0641 - mean_squared_error: 248.0644
Epoch 00031: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 248.1594 - mean_squared_error: 248.1598 - val_loss: 295.6273 - val_mean_squared_error: 295.6271
Epoch 32/100
74432/74477 [============================>.] - ETA: 0s - loss: 246.7691 - mean_squared_error: 246.7690
Epoch 00032: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 246.7535 - mean_squared_error: 246.7534 - val_loss: 270.6225 - val_mean_squared_error: 270.6224
Epoch 33/100
74400/74477 [============================>.] - ETA: 0s - loss: 245.7960 - mean_squared_error: 245.7961
Epoch 00033: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 245.7719 - mean_squared_error: 245.7721 - val_loss: 269.4164 - val_mean_squared_error: 269.4165
Epoch 34/100
74464/74477 [============================>.] - ETA: 0s - loss: 244.5691 - mean_squared_error: 244.5692
Epoch 00034: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 244.5610 - mean_squared_error: 244.5611 - val_loss: 272.1175 - val_mean_squared_error: 272.1175
Epoch 35/100
74368/74477 [============================>.] - ETA: 0s - loss: 242.4132 - mean_squared_error: 242.4128
Epoch 00035: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 242.4044 - mean_squared_error: 242.4040 - val_loss: 271.2292 - val_mean_squared_error: 271.2292
Epoch 36/100
74336/74477 [============================>.] - ETA: 0s - loss: 240.7576 - mean_squared_error: 240.7574
Epoch 00036: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 309us/sample - loss: 240.7171 - mean_squared_error: 240.7170 - val_loss: 292.7334 - val_mean_squared_error: 292.7336
Epoch 37/100
74464/74477 [============================>.] - ETA: 0s - loss: 239.4140 - mean_squared_error: 239.4142
Epoch 00037: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 299us/sample - loss: 239.4052 - mean_squared_error: 239.4053 - val_loss: 281.2551 - val_mean_squared_error: 281.2551
Epoch 38/100
74400/74477 [============================>.] - ETA: 0s - loss: 238.1324 - mean_squared_error: 238.1322
Epoch 00038: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 298us/sample - loss: 238.1675 - mean_squared_error: 238.1673 - val_loss: 272.3371 - val_mean_squared_error: 272.3371
Epoch 39/100
74304/74477 [============================>.] - ETA: 0s - loss: 236.4089 - mean_squared_error: 236.4088
Epoch 00039: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 236.3505 - mean_squared_error: 236.3504 - val_loss: 278.1665 - val_mean_squared_error: 278.1666
Epoch 40/100
74304/74477 [============================>.] - ETA: 0s - loss: 234.3191 - mean_squared_error: 234.3191
Epoch 00040: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 234.2353 - mean_squared_error: 234.2353 - val_loss: 278.2251 - val_mean_squared_error: 278.2251
Epoch 41/100
74336/74477 [============================>.] - ETA: 0s - loss: 231.8928 - mean_squared_error: 231.8926
Epoch 00041: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 231.9581 - mean_squared_error: 231.9578 - val_loss: 275.1084 - val_mean_squared_error: 275.1083
Epoch 42/100
74336/74477 [============================>.] - ETA: 0s - loss: 230.0675 - mean_squared_error: 230.0677
Epoch 00042: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 230.1069 - mean_squared_error: 230.1071 - val_loss: 280.2045 - val_mean_squared_error: 280.2046
Epoch 43/100
74368/74477 [============================>.] - ETA: 0s - loss: 228.0319 - mean_squared_error: 228.0317
Epoch 00043: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 228.0375 - mean_squared_error: 228.0373 - val_loss: 284.2572 - val_mean_squared_error: 284.2573
Epoch 44/100
74336/74477 [============================>.] - ETA: 0s - loss: 225.9381 - mean_squared_error: 225.9382
Epoch 00044: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 225.8883 - mean_squared_error: 225.8883 - val_loss: 280.5425 - val_mean_squared_error: 280.5424
Epoch 45/100
74368/74477 [============================>.] - ETA: 0s - loss: 224.3222 - mean_squared_error: 224.3223
Epoch 00045: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 304us/sample - loss: 224.2882 - mean_squared_error: 224.2884 - val_loss: 284.6918 - val_mean_squared_error: 284.6918
Epoch 46/100
74304/74477 [============================>.] - ETA: 0s - loss: 221.5468 - mean_squared_error: 221.5469
Epoch 00046: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 221.6089 - mean_squared_error: 221.6089 - val_loss: 287.6569 - val_mean_squared_error: 287.6570
Epoch 47/100
74336/74477 [============================>.] - ETA: 0s - loss: 219.2213 - mean_squared_error: 219.2211
Epoch 00047: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 290us/sample - loss: 219.2996 - mean_squared_error: 219.2995 - val_loss: 284.0935 - val_mean_squared_error: 284.0936
Epoch 48/100
74400/74477 [============================>.] - ETA: 0s - loss: 216.3922 - mean_squared_error: 216.3922
Epoch 00048: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 299us/sample - loss: 216.4321 - mean_squared_error: 216.4321 - val_loss: 285.4575 - val_mean_squared_error: 285.4576
Epoch 49/100
74336/74477 [============================>.] - ETA: 0s - loss: 214.3275 - mean_squared_error: 214.3277
Epoch 00049: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 297us/sample - loss: 214.3631 - mean_squared_error: 214.3633 - val_loss: 285.4213 - val_mean_squared_error: 285.4214
Epoch 50/100
74432/74477 [============================>.] - ETA: 0s - loss: 211.8401 - mean_squared_error: 211.8399
Epoch 00050: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 308us/sample - loss: 211.8134 - mean_squared_error: 211.8133 - val_loss: 296.3314 - val_mean_squared_error: 296.3315
Epoch 51/100
74336/74477 [============================>.] - ETA: 0s - loss: 209.2594 - mean_squared_error: 209.2594
Epoch 00051: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 291us/sample - loss: 209.1842 - mean_squared_error: 209.1841 - val_loss: 293.1050 - val_mean_squared_error: 293.1049
Epoch 52/100
74368/74477 [============================>.] - ETA: 0s - loss: 206.2668 - mean_squared_error: 206.2668
Epoch 00052: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 306us/sample - loss: 206.3351 - mean_squared_error: 206.3351 - val_loss: 296.5402 - val_mean_squared_error: 296.5401
Epoch 53/100
74368/74477 [============================>.] - ETA: 0s - loss: 203.7674 - mean_squared_error: 203.7674
Epoch 00053: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 203.7856 - mean_squared_error: 203.7856 - val_loss: 304.6554 - val_mean_squared_error: 304.6554
Epoch 54/100
74464/74477 [============================>.] - ETA: 0s - loss: 200.8496 - mean_squared_error: 200.8497
Epoch 00054: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 200.8412 - mean_squared_error: 200.8414 - val_loss: 317.8630 - val_mean_squared_error: 317.8628
Epoch 55/100
74432/74477 [============================>.] - ETA: 0s - loss: 198.0237 - mean_squared_error: 198.0234
Epoch 00055: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 197.9908 - mean_squared_error: 197.9905 - val_loss: 297.7535 - val_mean_squared_error: 297.7535
Epoch 56/100
74432/74477 [============================>.] - ETA: 0s - loss: 194.6245 - mean_squared_error: 194.6243
Epoch 00056: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 194.5942 - mean_squared_error: 194.5941 - val_loss: 304.2449 - val_mean_squared_error: 304.2449
Epoch 57/100
74432/74477 [============================>.] - ETA: 0s - loss: 192.2245 - mean_squared_error: 192.2247
Epoch 00057: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 192.2319 - mean_squared_error: 192.2320 - val_loss: 309.0905 - val_mean_squared_error: 309.0905
Epoch 58/100
74368/74477 [============================>.] - ETA: 0s - loss: 190.2162 - mean_squared_error: 190.2160
Epoch 00058: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 190.2174 - mean_squared_error: 190.2172 - val_loss: 344.9063 - val_mean_squared_error: 344.9062
Epoch 59/100
74464/74477 [============================>.] - ETA: 0s - loss: 186.3718 - mean_squared_error: 186.3718
Epoch 00059: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 303us/sample - loss: 186.3952 - mean_squared_error: 186.3953 - val_loss: 311.1655 - val_mean_squared_error: 311.1655
Epoch 60/100
74368/74477 [============================>.] - ETA: 0s - loss: 183.0226 - mean_squared_error: 183.0229
Epoch 00060: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 295us/sample - loss: 183.0109 - mean_squared_error: 183.0111 - val_loss: 316.2948 - val_mean_squared_error: 316.2948
Epoch 61/100
74400/74477 [============================>.] - ETA: 0s - loss: 181.1377 - mean_squared_error: 181.1376
Epoch 00061: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 181.1315 - mean_squared_error: 181.1314 - val_loss: 312.8629 - val_mean_squared_error: 312.8628
Epoch 62/100
74464/74477 [============================>.] - ETA: 0s - loss: 178.2981 - mean_squared_error: 178.2979
Epoch 00062: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 178.2855 - mean_squared_error: 178.2853 - val_loss: 328.8874 - val_mean_squared_error: 328.8875
Epoch 63/100
74368/74477 [============================>.] - ETA: 0s - loss: 175.0942 - mean_squared_error: 175.0942
Epoch 00063: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 175.0865 - mean_squared_error: 175.0865 - val_loss: 315.7908 - val_mean_squared_error: 315.7907
Epoch 64/100
74304/74477 [============================>.] - ETA: 0s - loss: 172.2454 - mean_squared_error: 172.2452
Epoch 00064: val_loss did not improve from 264.03856
74477/74477 [==============================] - 24s 317us/sample - loss: 172.2411 - mean_squared_error: 172.2409 - val_loss: 334.5078 - val_mean_squared_error: 334.5076
Epoch 65/100
74304/74477 [============================>.] - ETA: 0s - loss: 169.8728 - mean_squared_error: 169.8730
Epoch 00065: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 169.9658 - mean_squared_error: 169.9660 - val_loss: 40558.5508 - val_mean_squared_error: 40558.5547
Epoch 66/100
74304/74477 [============================>.] - ETA: 0s - loss: 166.8662 - mean_squared_error: 166.8662
Epoch 00066: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 295us/sample - loss: 166.9202 - mean_squared_error: 166.9203 - val_loss: 439.5774 - val_mean_squared_error: 439.5774
Epoch 67/100
74336/74477 [============================>.] - ETA: 0s - loss: 164.3416 - mean_squared_error: 164.3415
Epoch 00067: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 303us/sample - loss: 164.3579 - mean_squared_error: 164.3577 - val_loss: 328.9905 - val_mean_squared_error: 328.9908
Epoch 68/100
74432/74477 [============================>.] - ETA: 0s - loss: 162.1441 - mean_squared_error: 162.1441
Epoch 00068: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 162.1660 - mean_squared_error: 162.1660 - val_loss: 334.3659 - val_mean_squared_error: 334.3661
Epoch 69/100
74304/74477 [============================>.] - ETA: 0s - loss: 157.5201 - mean_squared_error: 157.5201
Epoch 00069: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 157.5348 - mean_squared_error: 157.5349 - val_loss: 351.9700 - val_mean_squared_error: 351.9699
Epoch 70/100
74464/74477 [============================>.] - ETA: 0s - loss: 155.7117 - mean_squared_error: 155.7116
Epoch 00070: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 155.7081 - mean_squared_error: 155.7080 - val_loss: 438.3632 - val_mean_squared_error: 438.3631
Epoch 71/100
74336/74477 [============================>.] - ETA: 0s - loss: 153.1367 - mean_squared_error: 153.1368
Epoch 00071: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 153.1111 - mean_squared_error: 153.1112 - val_loss: 333.7869 - val_mean_squared_error: 333.7869
Epoch 72/100
74336/74477 [============================>.] - ETA: 0s - loss: 149.9257 - mean_squared_error: 149.9256
Epoch 00072: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 150.0525 - mean_squared_error: 150.0523 - val_loss: 327.6574 - val_mean_squared_error: 327.6573
Epoch 73/100
74400/74477 [============================>.] - ETA: 0s - loss: 147.6730 - mean_squared_error: 147.6730
Epoch 00073: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 147.6759 - mean_squared_error: 147.6759 - val_loss: 352.6126 - val_mean_squared_error: 352.6124
Epoch 74/100
74400/74477 [============================>.] - ETA: 0s - loss: 145.0325 - mean_squared_error: 145.0325
Epoch 00074: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 306us/sample - loss: 145.0559 - mean_squared_error: 145.0560 - val_loss: 338.2765 - val_mean_squared_error: 338.2765
Epoch 75/100
74464/74477 [============================>.] - ETA: 0s - loss: 143.1951 - mean_squared_error: 143.1952
Epoch 00075: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 143.1974 - mean_squared_error: 143.1976 - val_loss: 528.3374 - val_mean_squared_error: 528.3375
Epoch 76/100
74336/74477 [============================>.] - ETA: 0s - loss: 139.2482 - mean_squared_error: 139.2482
Epoch 00076: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 139.2926 - mean_squared_error: 139.2926 - val_loss: 350.8385 - val_mean_squared_error: 350.8386
Epoch 77/100
74304/74477 [============================>.] - ETA: 0s - loss: 135.9739 - mean_squared_error: 135.9738
Epoch 00077: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 297us/sample - loss: 135.9768 - mean_squared_error: 135.9768 - val_loss: 353.5822 - val_mean_squared_error: 353.5822
Epoch 78/100
74464/74477 [============================>.] - ETA: 0s - loss: 134.0447 - mean_squared_error: 134.0448
Epoch 00078: val_loss did not improve from 264.03856
74477/74477 [==============================] - 24s 320us/sample - loss: 134.0478 - mean_squared_error: 134.0478 - val_loss: 356.7372 - val_mean_squared_error: 356.7374
Epoch 79/100
74464/74477 [============================>.] - ETA: 0s - loss: 131.9587 - mean_squared_error: 131.9587
Epoch 00079: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 131.9525 - mean_squared_error: 131.9524 - val_loss: 338.0418 - val_mean_squared_error: 338.0418
Epoch 80/100
74336/74477 [============================>.] - ETA: 0s - loss: 128.6879 - mean_squared_error: 128.6880
Epoch 00080: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 128.7260 - mean_squared_error: 128.7261 - val_loss: 362.5181 - val_mean_squared_error: 362.5180
Epoch 81/100
74368/74477 [============================>.] - ETA: 0s - loss: 126.5519 - mean_squared_error: 126.5519
Epoch 00081: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 306us/sample - loss: 126.5601 - mean_squared_error: 126.5601 - val_loss: 363.5475 - val_mean_squared_error: 363.5475
Epoch 82/100
74432/74477 [============================>.] - ETA: 0s - loss: 124.5209 - mean_squared_error: 124.5209
Epoch 00082: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 124.5263 - mean_squared_error: 124.5263 - val_loss: 372.1503 - val_mean_squared_error: 372.1505
Epoch 83/100
74368/74477 [============================>.] - ETA: 0s - loss: 121.6120 - mean_squared_error: 121.6119
Epoch 00083: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 121.5974 - mean_squared_error: 121.5973 - val_loss: 8163.2035 - val_mean_squared_error: 8163.2041
Epoch 84/100
74368/74477 [============================>.] - ETA: 0s - loss: 119.3854 - mean_squared_error: 119.3854
Epoch 00084: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 119.4036 - mean_squared_error: 119.4036 - val_loss: 346.1743 - val_mean_squared_error: 346.1740
Epoch 85/100
74304/74477 [============================>.] - ETA: 0s - loss: 116.9292 - mean_squared_error: 116.9292
Epoch 00085: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 306us/sample - loss: 116.9468 - mean_squared_error: 116.9468 - val_loss: 355.5451 - val_mean_squared_error: 355.5450
Epoch 86/100
74304/74477 [============================>.] - ETA: 0s - loss: 114.3042 - mean_squared_error: 114.3042
Epoch 00086: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 294us/sample - loss: 114.2766 - mean_squared_error: 114.2766 - val_loss: 362.5243 - val_mean_squared_error: 362.5244
Epoch 87/100
74304/74477 [============================>.] - ETA: 0s - loss: 112.8649 - mean_squared_error: 112.8650
Epoch 00087: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 292us/sample - loss: 112.8518 - mean_squared_error: 112.8519 - val_loss: 376.4348 - val_mean_squared_error: 376.4348
Epoch 88/100
74368/74477 [============================>.] - ETA: 0s - loss: 110.1313 - mean_squared_error: 110.1313
Epoch 00088: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 301us/sample - loss: 110.1435 - mean_squared_error: 110.1435 - val_loss: 363.6357 - val_mean_squared_error: 363.6356
Epoch 89/100
74336/74477 [============================>.] - ETA: 0s - loss: 108.0200 - mean_squared_error: 108.0199
Epoch 00089: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 299us/sample - loss: 108.0276 - mean_squared_error: 108.0276 - val_loss: 365.7047 - val_mean_squared_error: 365.7047
Epoch 90/100
74368/74477 [============================>.] - ETA: 0s - loss: 109.3596 - mean_squared_error: 109.3595
Epoch 00090: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 109.3236 - mean_squared_error: 109.3235 - val_loss: 378.1385 - val_mean_squared_error: 378.1385
Epoch 91/100
74336/74477 [============================>.] - ETA: 0s - loss: 102.9651 - mean_squared_error: 102.9651
Epoch 00091: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 302us/sample - loss: 102.9899 - mean_squared_error: 102.9899 - val_loss: 381.9554 - val_mean_squared_error: 381.9552
Epoch 92/100
74464/74477 [============================>.] - ETA: 0s - loss: 101.1517 - mean_squared_error: 101.1516
Epoch 00092: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 312us/sample - loss: 101.1618 - mean_squared_error: 101.1618 - val_loss: 374.8977 - val_mean_squared_error: 374.8977
Epoch 93/100
74400/74477 [============================>.] - ETA: 0s - loss: 100.0674 - mean_squared_error: 100.0674
Epoch 00093: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 100.0389 - mean_squared_error: 100.0389 - val_loss: 444.8390 - val_mean_squared_error: 444.8390
Epoch 94/100
74304/74477 [============================>.] - ETA: 0s - loss: 100.0567 - mean_squared_error: 100.0569
Epoch 00094: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 297us/sample - loss: 100.0590 - mean_squared_error: 100.0592 - val_loss: 371.6653 - val_mean_squared_error: 371.6654
Epoch 95/100
74432/74477 [============================>.] - ETA: 0s - loss: 95.8865 - mean_squared_error: 95.8865
Epoch 00095: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 297us/sample - loss: 95.9215 - mean_squared_error: 95.9216 - val_loss: 407.8994 - val_mean_squared_error: 407.8993
Epoch 96/100
74432/74477 [============================>.] - ETA: 0s - loss: 94.2657 - mean_squared_error: 94.2657
Epoch 00096: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 305us/sample - loss: 94.2671 - mean_squared_error: 94.2671 - val_loss: 373.7891 - val_mean_squared_error: 373.7890
Epoch 97/100
74304/74477 [============================>.] - ETA: 0s - loss: 92.6529 - mean_squared_error: 92.6530
Epoch 00097: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 92.6861 - mean_squared_error: 92.6862 - val_loss: 405.3944 - val_mean_squared_error: 405.3942
Epoch 98/100
74432/74477 [============================>.] - ETA: 0s - loss: 90.1847 - mean_squared_error: 90.1846
Epoch 00098: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 293us/sample - loss: 90.1993 - mean_squared_error: 90.1993 - val_loss: 373.5951 - val_mean_squared_error: 373.5952
Epoch 99/100
74368/74477 [============================>.] - ETA: 0s - loss: 88.9154 - mean_squared_error: 88.9155
Epoch 00099: val_loss did not improve from 264.03856
74477/74477 [==============================] - 23s 307us/sample - loss: 88.9367 - mean_squared_error: 88.9367 - val_loss: 387.0982 - val_mean_squared_error: 387.0983
Epoch 100/100
74464/74477 [============================>.] - ETA: 0s - loss: 87.5135 - mean_squared_error: 87.5135
Epoch 00100: val_loss did not improve from 264.03856
74477/74477 [==============================] - 22s 294us/sample - loss: 87.5302 - mean_squared_error: 87.5303 - val_loss: 396.7547 - val_mean_squared_error: 396.7549
In [ ]:
x_test = x_test.drop('artist_name', 1)
x_test.head()
Out[ ]:
0 1 2 3 4 5 6 7 8 9 mode 0_L 1_L 2 3_L 4_L 5_L 6 7 8 9 10 11 0_R 1_R 3_R 4_R 5_R
0 1.884681 -2.090331 0.444770 -1.800243 2.074084 -0.359758 -2.509829 -0.547717 0.371267 -1.579247 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
1 0.975861 -0.951895 -0.894231 -1.022731 -0.633315 1.033378 -0.325195 -0.562990 1.057422 0.864337 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0
2 -0.471843 1.145777 0.077558 0.621263 -0.550497 -0.715516 0.577207 -0.465725 -0.653201 0.571738 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0
3 1.849726 -2.158848 -0.804336 -2.141420 1.765925 -0.467383 -2.445141 -0.582282 -1.833955 -1.558842 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0
4 1.272975 -0.013740 -0.147496 -0.194931 -0.633315 0.262071 0.782179 0.881520 2.065018 1.376384 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
In [ ]:
artist_names = [row.split() for row in test_df['artist_name']]
In [ ]:
from tqdm import tqdm
import numpy as np

artist_vec = []
artist_avg_vec = []
count = 0
for row in tqdm(range(len(artist_names))):
  [word.split(' ', 1) for word in artist_names[row]]
  for i in range(len(artist_names[row])):
    try:
      artist_vec.append(model_artist[artist_names[row][i]])
      count = count + 1
    except KeyError as e:
      artist_vec.append([0]*100)
  
  average = np.add.reduce(artist_vec)
  if count==0:
    count = 1
  average = np.divide(average, count)
  artist_avg_vec.append(average)
  artist_vec = []
  count = 0
  0%|          | 0/23275 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:11: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
  # This is added back by InteractiveShellApp.init_path()
100%|██████████| 23275/23275 [00:00<00:00, 32333.90it/s]
In [ ]:
model_1 = tf.keras.models.load_model('artist_name.model')
artist_avg_vec = np.array(artist_avg_vec)
artist_avg_vec = artist_avg_vec.reshape(23275, 100, 1)
prediction = model_1.predict(artist_avg_vec)

x_test = np.concatenate((x_test, prediction), axis=1)
x_test.shape
Out[ ]:
(23275, 29)
In [ ]:
x_test = x_test.reshape(23275, 29, 1)
In [ ]:
#Ensembled CNN method evaluation
model1 = tf.keras.models.load_model('spotify.model')
score, acc = model1.evaluate(x_test, y_test)
23275/23275 [==============================] - 3s 128us/sample - loss: 285.2079 - mean_squared_error: 285.2079
In [ ]:
x_test = x_test.reshape(23275, 128, 1)
In [ ]:
#All features CNN method evaluation
import tensorflow as tf

model_1 = tf.keras.models.load_model('spotify.model')
score, acc = model_1.evaluate(x_test, y_test)
23275/23275 [==============================] - 2s 99us/sample - loss: 287.2511 - mean_squared_error: 287.2512