path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1 value |
|---|---|---|---|
89125628/cell_51 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.preprocessing import StandardScaler
details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]}
df = pd.DataFrame(details)
scaler = StandardScaler()
df = scaler.fit_transform(df)
df = pd.DataFrame(df)
plt = df.plot.bar() | code |
89125628/cell_59 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]}
df = pd.DataFrame(details)
scaler = StandardScaler()
df = scaler.fit_transform(df)
df = pd.DataFrame(df)
plt = df.plot.bar()
df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D'])
import matplotlib.pyplot as plt
plt = df.plot.bar()
df_max_scaled = df.copy()
for column in df_max_scaled.columns:
df_max_scaled[column] = df_max_scaled[column] / df_max_scaled[column].abs().max()
import matplotlib.pyplot as plt
plt = df_max_scaled.plot.bar() | code |
89125628/cell_58 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]}
df = pd.DataFrame(details)
scaler = StandardScaler()
df = scaler.fit_transform(df)
df = pd.DataFrame(df)
plt = df.plot.bar()
df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D'])
import matplotlib.pyplot as plt
plt = df.plot.bar()
df_max_scaled = df.copy()
for column in df_max_scaled.columns:
df_max_scaled[column] = df_max_scaled[column] / df_max_scaled[column].abs().max()
display(df_max_scaled) | code |
129007116/cell_42 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
w = 9909
b = 21641.1797
def linearRegression(x, w, b):
pre_y = []
m = len(x)
for i in range(m):
f_wb = w * x[i] + b
pre_y.append(f_wb)
return pre_y
y_pred = linearRegression(x, w, b)
def SquaredErrorCost(y_pred, y):
totalCost = 0
m = len(y_pred)
for i in range(m):
cost = (y_pred[i] - y[i]) ** 2
totalCost += cost
totalCost /= 2 * m
return totalCost
def compute_cost(x, y, w, b):
m = x.shape[0]
cost = 0
for i in range(m):
f_wb = w * x[i] + b
cost = cost + (f_wb - y[i]) ** 2
total_cost = 1 / (2 * m) * cost
return total_cost
cost = SquaredErrorCost(y_pred, y)
print(' Squared Error Cost :', cost)
print(f' Squared Error Cost : {cost:10}')
print('Squared Error Cost: {:.5e}'.format(cost)) | code |
129007116/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
print('Duplicate Values =', data.duplicated().sum()) | code |
129007116/cell_30 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | w = 9909
b = 21641.1797
print('w :', w)
print('b :', b) | code |
129007116/cell_29 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
plt.title('Salary Data')
plt.scatter(x, y, marker='x', c='r')
plt.xlabel('Years of Experience')
plt.ylabel('Salary (per year)')
plt.show() | code |
129007116/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
print('x_train data is')
x | code |
129007116/cell_48 | [
"text_plain_output_1.png"
] | import math
import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
w = 9909
b = 21641.1797
def linearRegression(x, w, b):
pre_y = []
m = len(x)
for i in range(m):
f_wb = w * x[i] + b
pre_y.append(f_wb)
return pre_y
y_pred = linearRegression(x, w, b)
def SquaredErrorCost(y_pred, y):
totalCost = 0
m = len(y_pred)
for i in range(m):
cost = (y_pred[i] - y[i]) ** 2
totalCost += cost
totalCost /= 2 * m
return totalCost
def compute_cost(x, y, w, b):
m = x.shape[0]
cost = 0
for i in range(m):
f_wb = w * x[i] + b
cost = cost + (f_wb - y[i]) ** 2
total_cost = 1 / (2 * m) * cost
return total_cost
def compute_gradient(x, y, w, b):
m = len(x)
dj_dw = w
dj_db = b
for i in range(m):
f_wb = w * x[i] + b
dj_dw_i = (f_wb - y[i]) * x[i]
dj_db_i = f_wb - y[i]
dj_dw += dj_dw_i
dj_db += dj_db_i
dj_dw /= m
dj_db /= m
return (dj_dw, dj_db)
def gradient_descent(x, y, w_in, b_in, alpha, num_iters, cost_function, gradient_function):
"""
Performs gradient descent to fit w,b. Updates w,b by taking
num_iters gradient steps with learning rate alpha
Args:
x (ndarray (m,)) : Data, m examples
y (ndarray (m,)) : target values
w_in,b_in (scalar): initial values of model parameters
alpha (float): Learning rate
num_iters (int): number of iterations to run gradient descent
cost_function: function to call to produce cost
gradient_function: function to call to produce gradient
Returns:
w (scalar): Updated value of parameter after running gradient descent
b (scalar): Updated value of parameter after running gradient descent
J_history (List): History of cost values
p_history (list): History of parameters [w,b]
"""
J_history = []
p_history = []
b = b_in
w = w_in
for i in range(num_iters):
dj_dw, dj_db = gradient_function(x, y, w, b)
b = b - alpha * dj_db
w = w - alpha * dj_dw
if i < 100000:
J_history.append(cost_function(x, y, w, b))
p_history.append([w, b])
return (w, b, J_history, p_history)
w_init = 0
b_init = 0
iterations = 10000
tmp_alpha = 0.01
w_final, b_final, J_hist, p_hist = gradient_descent(x, y, w_init, b_init, tmp_alpha, iterations, compute_cost, compute_gradient)
print(f'(w,b) found by gradient descent: ({w_final:8.4f},{b_final:8.4f})') | code |
129007116/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
w = 9909
b = 21641.1797
def linearRegression(x, w, b):
pre_y = []
m = len(x)
for i in range(m):
f_wb = w * x[i] + b
pre_y.append(f_wb)
return pre_y
y_pred = linearRegression(x, w, b)
data = {'y': y, 'y_pred': y_pred, 'error': abs(y_pred - y)}
df = pd.DataFrame(data)
print(df) | code |
129007116/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.head() | code |
129007116/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'}) | code |
129007116/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.tail() | code |
129007116/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum() | code |
129007116/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
print('data shape :', data.shape) | code |
129007116/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
print(data.info()) | code |
129007116/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
print('y_train data is')
y | code |
129007116/cell_36 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv')
data.isnull().sum()
data.isnull().sum().to_frame().rename(columns={0: 'Total No. of Missing Values'})
x = data.YearsExperience
y = data.Salary
w = 9909
b = 21641.1797
def linearRegression(x, w, b):
pre_y = []
m = len(x)
for i in range(m):
f_wb = w * x[i] + b
pre_y.append(f_wb)
return pre_y
y_pred = linearRegression(x, w, b)
plt.plot(x, y_pred, c='b', label='Our Prediction')
plt.scatter(x, y, marker='x', c='r', label='Actual Values')
plt.title('Housing Prices')
plt.ylabel('Price (in 1000s of dollars)')
plt.xlabel('Size (1000 sqft)')
plt.legend()
plt.show() | code |
2041173/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import warnings
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MaxAbsScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
import warnings
warnings.filterwarnings('ignore')
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2041173/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.preprocessing import MaxAbsScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
subm = pd.read_csv('../input/sample_submission.csv')
df = pd.concat([train['comment_text'], test['comment_text']], axis=0)
df = df.fillna('unknown')
nrow_train = train.shape[0]
vectorizer = TfidfVectorizer(stop_words='english', max_features=50000)
data = vectorizer.fit_transform(df)
X = MaxAbsScaler().fit_transform(data)
col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
preds = np.zeros((test.shape[0], len(col)))
loss = []
for i, j in enumerate(col):
print('===Fit ' + j)
model = LogisticRegression()
model.fit(X[:nrow_train], train[j])
preds[:, i] = model.predict_proba(X[nrow_train:])[:, 1]
pred_train = model.predict_proba(X[:nrow_train])[:, 1]
print('log loss:', log_loss(train[j], pred_train))
loss.append(log_loss(train[j], pred_train))
print('mean column-wise log loss:', np.mean(loss))
submid = pd.DataFrame({'id': subm['id']})
submission = pd.concat([submid, pd.DataFrame(preds, columns=col)], axis=1) | code |
2041173/cell_8 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.preprocessing import MaxAbsScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
subm = pd.read_csv('../input/sample_submission.csv')
df = pd.concat([train['comment_text'], test['comment_text']], axis=0)
df = df.fillna('unknown')
nrow_train = train.shape[0]
vectorizer = TfidfVectorizer(stop_words='english', max_features=50000)
data = vectorizer.fit_transform(df)
X = MaxAbsScaler().fit_transform(data)
col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
preds = np.zeros((test.shape[0], len(col)))
loss = []
for i, j in enumerate(col):
model = LogisticRegression()
model.fit(X[:nrow_train], train[j])
preds[:, i] = model.predict_proba(X[nrow_train:])[:, 1]
pred_train = model.predict_proba(X[:nrow_train])[:, 1]
loss.append(log_loss(train[j], pred_train))
submid = pd.DataFrame({'id': subm['id']})
submission = pd.concat([submid, pd.DataFrame(preds, columns=col)], axis=1)
submission.head() | code |
2041173/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test[0:10] | code |
105203676/cell_21 | [
"text_html_output_1.png"
] | from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = pad_sequences(sequences, maxlen=max_len) | code |
105203676/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
df.describe(include='object') | code |
105203676/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2] | code |
105203676/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
plt.rcParams['figure.figsize'] = (10, 4)
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)])
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show() | code |
105203676/cell_20 | [
"text_html_output_1.png"
] | from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
print(concated['LABEL'][:10])
labels = to_categorical(concated['LABEL'], num_classes=5)
print(labels[:10])
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1) | code |
105203676/cell_29 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)]) | code |
105203676/cell_26 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax')) | code |
105203676/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
plt.hist(x=df['length']) | code |
105203676/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105203676/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
df.info() | code |
105203676/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
df['Category'].value_counts() | code |
105203676/cell_28 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print(model.summary()) | code |
105203676/cell_8 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
print(df['Category'].value_counts())
sns.countplot(data=df, x='Category', palette='RdBu')
plt.title('The Distribution of Category') | code |
105203676/cell_15 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
df[['length', 'polarity', 'subjectivity']] | code |
105203676/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
plt.rcParams['figure.figsize'] = (10, 4)
plt.subplot(1, 2, 1)
sns.distplot(df['polarity'])
plt.subplot(1, 2, 2)
sns.distplot(df['subjectivity'])
plt.suptitle('Distribution of Polarity and Subjectivity')
plt.show() | code |
105203676/cell_35 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)])
accr = model.evaluate(X_test, y_test)
txt = ['Rafael Nadal extended his Grand Slam winning streak to 19 matches with a 3-6, 7-5, 3-6, 7-5, 7-6 (10-4) victory over Taylor Fritz']
seq = tokenizer.texts_to_sequences(txt)
padded = pad_sequences(seq, maxlen=max_len)
pred = model.predict(padded)
labels = ['sport', 'business', 'politics', 'entertainment', 'tech']
print(pred, labels[np.argmax(pred)]) | code |
105203676/cell_31 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
num_of_categories = 45000
shuffled = df.reindex(np.random.permutation(df.index))
sport = shuffled[shuffled['Category'] == 'sport'][:num_of_categories]
business = shuffled[shuffled['Category'] == 'business'][:num_of_categories]
politics = shuffled[shuffled['Category'] == 'politics'][:num_of_categories]
entertainment = shuffled[shuffled['Category'] == 'entertainment'][:num_of_categories]
tech = shuffled[shuffled['Category'] == 'tech'][:num_of_categories]
concated = pd.concat([sport, business, politics, entertainment, tech], ignore_index=True)
concated = concated.reindex(np.random.permutation(concated.index))
concated['LABEL'] = 0
concated.loc[concated['Category'] == 'sport', 'LABEL'] = 0
concated.loc[concated['Category'] == 'business', 'LABEL'] = 1
concated.loc[concated['Category'] == 'politics', 'LABEL'] = 2
concated.loc[concated['Category'] == 'entertainment', 'LABEL'] = 3
concated.loc[concated['Category'] == 'tech', 'LABEL'] = 4
labels = to_categorical(concated['LABEL'], num_classes=5)
if 'Category' in concated.keys():
concated.drop(['Category'], axis=1)
n_most_common_words = 8000
max_len = 130
tokenizer = Tokenizer(num_words=n_most_common_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(concated['Text'].values)
sequences = tokenizer.texts_to_sequences(concated['Text'].values)
word_index = tokenizer.word_index
X = pad_sequences(sequences, maxlen=max_len)
epochs = 50
emb_dim = 128
batch_size = 64
labels[:2]
model = Sequential()
model.add(Embedding(n_most_common_words, emb_dim, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.7))
model.add(LSTM(64, dropout=0.7, recurrent_dropout=0.7))
model.add(Dense(5, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=7, min_delta=0.01)])
accr = model.evaluate(X_test, y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0], accr[1])) | code |
105203676/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
df[['length', 'polarity', 'Text']] | code |
105203676/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/bbc-news/BBC News Train.csv')
print('Shpe of Data', df.shape)
df.head(10) | code |
32074163/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.info() | code |
32074163/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum()
pd.api.types.is_string_dtype(df_tmp['UsageBand'])
df_tmp.UsageBand
for label, content in df_tmp.items():
if pd.api.types.is_string_dtype(content):
print(label) | code |
32074163/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df.head() | code |
32074163/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum()
df_tmp.info() | code |
32074163/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum() | code |
32074163/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df.SalePrice.plot.hist() | code |
32074163/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv') | code |
32074163/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.info() | code |
32074163/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32074163/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts() | code |
32074163/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df.info() | code |
32074163/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum()
df_tmp.UsageBand | code |
32074163/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df.saledate.head(20) | code |
32074163/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum()
pd.api.types.is_string_dtype(df_tmp['UsageBand']) | code |
32074163/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.head() | code |
32074163/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df_tmp = df.copy()
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayofweek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayofyear'] = df_tmp.saledate.dt.dayofyear
df_tmp.drop('saledate', axis=1, inplace=True)
df_tmp.state.value_counts()
df_tmp.isnull().sum()
pd.api.types.is_string_dtype(df_tmp['UsageBand'])
df_tmp.UsageBand
for label, content in df_tmp.items():
if pd.api.types.is_string_dtype(content):
df_tmp[label] = content.astype('category').cat.as_ordered()
df_tmp.info() | code |
32074163/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv', low_memory=False, parse_dates=['saledate'])
df.saledate.head(20) | code |
32074163/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/bluebook-for-bulldozers/TrainAndValid.csv')
fig, ax = plt.subplots()
ax.scatter(df['saledate'][:1000], df['SalePrice'][:1000]) | code |
1005908/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('no\\s*\\w*\\s*fee') | code |
1005908/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('doorman') | code |
1005908/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
def add_feature(row):
if re.search('hardwood', row['features_new'], re.IGNORECASE) != None:
row['hardwood'] = 1
else:
row['hardwood'] = 0
if re.search('doorman', row['features_new'], re.IGNORECASE) != None:
row['doorman'] = 1
else:
row['doorman'] = 0
if re.search('no\\w*fee', row['features_new'], re.IGNORECASE) != None:
row['no_fee'] = 1
else:
row['no_fee'] = 0
if re.search('reduce|low\\wfee', row['features_new'], re.IGNORECASE) != None:
row['reduce_fee'] = 1
else:
row['reduce_fee'] = 0
if re.search('laundry', row['features_new'], re.IGNORECASE) != None:
row['laundry'] = 1
else:
row['laundry'] = 0
if re.search('war\\Z|war\\s|war_', row['features_new'], re.IGNORECASE) != None:
row['war'] = 1
else:
row['war'] = 0
if re.search('fitness|gym', row['features_new'], re.IGNORECASE) != None:
row['gym'] = 1
else:
row['gym'] = 0
return row
train = train.apply(add_feature, axis=1)
train[['hardwood', 'doorman', 'no_fee', 'reduce_fee', 'laundry', 'war', 'gym']].apply(sum) | code |
1005908/cell_6 | [
"text_plain_output_1.png"
] | import itertools as it
import pandas as pd
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total) | code |
1005908/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('fitness|gym') | code |
1005908/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('reduce|low\\sfee') | code |
1005908/cell_7 | [
"text_plain_output_1.png"
] | import itertools as it
import pandas as pd
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
list(uniq_feature_total)[:10] | code |
1005908/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('war\\Z|war\\s') | code |
1005908/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
target_num_map = {'high': 0, 'medium': 1, 'low': 2}
features = tr_sparse.toarray()
labels = train['interest_level'].apply(lambda x: target_num_map[x]).as_matrix()
clf = DecisionTreeClassifier(max_depth=5)
cv = StratifiedShuffleSplit(n_splits=3, test_size=0.3)
for train_idx, test_idx in cv.split(features, labels):
features_train, labels_train = (features[train_idx], labels[train_idx])
features_test, labels_test = (features[test_idx], labels[test_idx])
clf.fit(features_train, labels_train)
print('log loss:', -1 * round(log_loss(labels_test, clf.predict_proba(features_test)), 3))
print('high importance features:')
for idx in np.where(clf.feature_importances_ > 0.05)[0]:
print(' ', feature_names[idx], round(clf.feature_importances_[idx], 3)) | code |
1005908/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('fee') | code |
1005908/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('hardwood') | code |
1005908/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import itertools as it
import pandas as pd
import re
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total)
uniq_feature_total = set(feature_total)
len(uniq_feature_total)
vec = CountVectorizer(stop_words='english', max_features=200)
train['features_new'] = train['features'].apply(lambda y: ' '.join(['_'.join(x.split(' ')).lower() for x in y]))
tr_sparse = vec.fit_transform(train['features_new'])
feature_names = vec.get_feature_names()
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
uniq_feature_total = list(set(feature_total))
def re_search(key):
"""
Present all features with specific re pattern
"""
result = []
my_reg = '' + key
for item in uniq_feature_total:
if re.compile(my_reg, re.IGNORECASE).search(item) != None:
result.append(item)
return result
re_search('laundry') | code |
1005908/cell_5 | [
"text_plain_output_1.png"
] | import itertools as it
import pandas as pd
train = pd.read_json('../input/train.json')
train['listing_id'] = train['listing_id'].apply(str)
feature_total = []
train['features'].apply(lambda x: feature_total.append(x))
feature_total = list(it.chain.from_iterable(feature_total))
len(feature_total) | code |
73095165/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | Project | code |
33120729/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr
mean_deg_c1 = df.groupby('SSM').mean()['degree_C1']
mean_deg_c1
miss_bool = df['degree_C1'].isnull()
df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x])
df['degree_C1'].isnull().sum()
mean_deg_c3 = df.groupby('SSM').mean()['degree_C3']
mean_deg_c3
miss_bool = df['degree_C3'].isnull()
df.loc[miss_bool, 'degree_C3'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c3[x])
df['degree_C3'].isnull().sum()
mean_moisture_9 = df.groupby('SSM').mean()['moisture_9']
mean_moisture_9 | code |
33120729/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.describe() | code |
33120729/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
def secSinceNoon(datTimStr):
tt = pd.to_datetime(datTimStr).time()
return (tt.hour * 3600 + tt.minute * 60 + tt.second) / 60.0
df['SSM'] = df['timestamp'].apply(secSinceNoon)
df['SSM'] | code |
33120729/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
print(df_training.head(3))
print(df_testing.head(3)) | code |
33120729/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr
mean_deg_c1 = df.groupby('SSM').mean()['degree_C1']
mean_deg_c1
miss_bool = df['degree_C1'].isnull()
df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x])
df['degree_C1'].isnull().sum()
mean_deg_c3 = df.groupby('SSM').mean()['degree_C3']
mean_deg_c3
miss_bool = df['degree_C3'].isnull()
df.loc[miss_bool, 'degree_C3'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c3[x])
df['degree_C3'].isnull().sum() | code |
33120729/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
df_training.head() | code |
33120729/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False) | code |
33120729/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr
mean_deg_c1 = df.groupby('SSM').mean()['degree_C1']
mean_deg_c1
miss_bool = df['degree_C1'].isnull()
df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x])
df['degree_C1'].isnull().sum()
mean_deg_c3 = df.groupby('SSM').mean()['degree_C3']
mean_deg_c3 | code |
33120729/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr
mean_deg_c1 = df.groupby('SSM').mean()['degree_C1']
mean_deg_c1
miss_bool = df['degree_C1'].isnull()
df.loc[miss_bool, 'degree_C1'] = df.loc[miss_bool, 'SSM'].apply(lambda x: mean_deg_c1[x])
df['degree_C1'].isnull().sum() | code |
33120729/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df['timestamp'] | code |
33120729/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
plt.figure(figsize=(10, 8))
sns.boxplot(x=df['luminousity'], y=df['WattHour'])
plt.show() | code |
33120729/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr | code |
33120729/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_testing.head() | code |
33120729/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique()))
corr = df.corr().round(2)
corr
mean_deg_c1 = df.groupby('SSM').mean()['degree_C1']
mean_deg_c1 | code |
33120729/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
df.apply(lambda x: len(x.unique())) | code |
33120729/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df['SSM'].value_counts() | code |
33120729/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df
df.isnull().sum().sort_values(ascending=False)
plt.figure(figsize=(10, 8))
sns.heatmap(df.isnull(), cbar=False, yticklabels=False, cmap='viridis')
plt.show() | code |
33120729/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
url = '../input/dataquest2020/energy_train.csv'
df_training = pd.read_csv(url)
url = '../input/dataquest2020/energy_test.csv'
df_testing = pd.read_csv(url)
df_training['source'] = 'train'
df_testing['source'] = 'test'
df = pd.concat([df_training, df_testing], axis=0, ignore_index=True)
df | code |
129001503/cell_4 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from icevision.all import * | code |
129001503/cell_2 | [
"text_plain_output_1.png"
] | # Download IceVision installation script:
!wget https://raw.githubusercontent.com/airctic/icevision/master/icevision_install.sh
# Choose installation target: cuda11 or cuda10 or cpu
!bash icevision_install.sh cuda10 master | code |
129001503/cell_1 | [
"text_plain_output_1.png"
] | !python --version | code |
129001503/cell_3 | [
"text_plain_output_1.png"
] | import IPython
import IPython
IPython.Application.instance().kernel.do_shutdown(True) | code |
129001503/cell_5 | [
"text_plain_output_1.png"
] | print("Let's begin!") | code |
129039718/cell_4 | [
"image_output_11.png",
"text_plain_output_100.png",
"text_plain_output_334.png",
"image_output_239.png",
"image_output_98.png",
"text_plain_output_445.png",
"image_output_337.png",
"text_plain_output_201.png",
"text_plain_output_261.png",
"image_output_121.png",
"image_output_180.png",
"image_... | import cv2
import glob
mask_directory = '/kaggle/input/mask-images/testMasks'
mask_names = glob.glob('/kaggle/input/mask-images/testMasks/*.tif')
mask_names = sorted(mask_names, key=lambda x: (len(x), x))
masks = [cv2.imread(mask, 0) for mask in mask_names]
for i in range(len(masks)):
print(i)
plt.imshow(masks[i], cmap='gray')
plt.pause(0.1) | code |
129039718/cell_3 | [
"text_plain_output_1.png"
] | import cv2
import glob
mask_directory = '/kaggle/input/mask-images/testMasks'
mask_names = glob.glob('/kaggle/input/mask-images/testMasks/*.tif')
mask_names = sorted(mask_names, key=lambda x: (len(x), x))
print(mask_names[0:6])
masks = [cv2.imread(mask, 0) for mask in mask_names]
print(len(masks)) | code |
90108947/cell_21 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import matplotlib.pylab as plt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date')
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1)
test_data = scaled_data[training_data_len - 60:, :]
X_test = []
Y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
X_test.append(test_data[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predictions = model.predict(X_test)
predictions = scaler.inverse_transform(predictions)
train = df[:training_data_len]
valid = df[training_data_len:]
valid['Predictions'] = predictions
plt.figure(figsize=(16, 8))
plt.title('Model')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Closing Price', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close', 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show() | code |
90108947/cell_9 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import math
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
if i <= 61:
print(X_train)
print(Y_train)
print() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.