description: python iterate a dataframe
loop each row of a dataframe
1 | import pandas |
more on search4fan.github.io
description: python iterate a dataframe
1 | import pandas |
more on search4fan.github.io
description: python assign or set value if conditions match
1 | import pandas |
1 | tmp = data[data["a"] == 1] |
1 | tmp = tmp[tmp["b"] == 2] |
1 | indexes = tmp.index |
1 | data._set_value(indexes, 'test', 888888) |
more on search4fan.github.io
description: build small network example in python
1 | def initialize_network(n_inputs, n_hidden, n_outputs): |
1 | seed(1) |
1 | [{'weights': [0.13436424411240122, 0.8474337369372327, 0.763774618976614]}] |
description: create simple MLP in Keras
1 | from keras.models import Sequential |
1 | numpy.random.seed(7) |
1 | dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",") |
1 | X = dataset[:,0:8] |
1 | model = Sequential() |
1 | model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) |
1 | model.fit(X, Y, epochs=150, batch_size=10) |
1 | scores = model.evaluate(X, Y) |
description: simple CNN example for MNIST dataset
1 | import numpy |
1 | seed = 7 |
1 | (X_train, y_train), (X_test, y_test) = mnist.load_data() |
1 | X_train = X_train / 255 |
1 | def baseline_model(): |
1 | # build the model |
1 | Train on 60000 samples, validate on 10000 samples |
description: a small example for reinforcement learning in python
1 | import numpy as np |
1 | NUMBER_OF_STATES = 6 # the length of the 1 dimensional world |
1 | def build_q_table(number_of_states, actions): |
1 | def choose_action(state, q_table): |
1 | def get_env_feedback(Current_State, Current_Action): |
1 | def update_env(state, episode, step_counter): |
1 | def rl(): |
1 | if __name__ == "__main__": |
description: simple lstm example tensorflow
1 | def sigmoid(x): |
1 | def sigmoid_output_to_derivative(output): |
1 | int2binary = {} |
1 | alpha = 0.1 |
1 | synapse_0 = 2 * np.random.random((input_dim, hidden_dim)) - 1 |
1 | for j in range(10000): |
1 | Error:[3.45638663] |
description: use LSTM to predict words in Text in python
1 | import numpy |
1 | filename = "wonderland.txt" |
1 | chars = sorted(list(set(raw_text))) |
1 | # Total Characters: 147674 |
1 | seq_length = 100 |
1 | X = numpy.reshape(dataX, (n_patterns, seq_length, 1)) |
1 | X = X / float(n_vocab) |
1 | y = np_utils.to_categorical(dataY) |
1 | model = Sequential() |
1 | filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" |
1 | model.fit(X, y, epochs=20, batch_size=128, callbacks=callbacks_list) |
1 | def activate(weights, inputs): |
1 | from math import exp |
1 | def forward_propagate(network, row): |
1 | network = [[{'weights': [0.13436424411240122, 0.8474337369372327, 0.763774618976614]}], |
1 | [0.6629970129852887, 0.7253160725279748] |
description: build Decision Tree from bank note dataset in python
1 | from random import seed |
1 | def load_csv(filename): |
1 | def str_column_to_float(dataset, column): |
1 | def cross_validation_split(dataset, n_folds): |
1 | def accuracy_metric(actual, predicted): |
1 | def evaluate_algorithm(dataset, algorithm, n_folds, *args): |
1 | def test_split(index, value, dataset): |
1 | def gini_index(groups, classes): |
1 | def get_split(dataset): |
1 | def to_terminal(group): |
1 | def split(node, max_depth, min_size, depth): |
1 | def build_tree(train, max_depth, min_size): |
1 | def predict(node, row): |
1 | def decision_tree(train, test, max_depth, min_size): |
1 | seed(1) |
1 | filename = 'data_banknote_authentication.csv' |
1 | for i in range(len(dataset[0])): |
1 | n_folds = 5 |
1 | Scores: [100.0, 100.0, 100.0, 100.0, 100.0] |