首先不同网站,当然可以使用同一个邮箱,比如我的github,gitlab,bitbucket的账号都是monkeysuzie[at]gmail.com 这时候不用担心密钥的问题,因为这些网站push pull 认证的唯一性的是邮箱 比如我的windows 上 2个账号一个gitlab 一个github (用的都是id_rsa)
host github
hostname github.com
Port 22
host gitlab.zjut.com
| def clr(self): | |
| cycle = np.floor(1 + self.clr_iterations / (2 * self.step_size)) | |
| x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1) | |
| if self.scale_mode == 'cycle': | |
| return self.base_lr + (self.max_lr - self.base_lr) * \ | |
| np.maximum(0, (1 - x)) * self.scale_fn(cycle) | |
| else: | |
| return self.base_lr + (self.max_lr - self.base_lr) * \ | |
| np.maximum(0, (1 - x)) * self.scale_fn(self.clr_iterations) |
| def step_decay(epoch): | |
| initial_lrate = 0.1 | |
| drop = 0.5 | |
| epochs_drop = 10.0 | |
| lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) | |
| return lrate | |
| # ... | |
| lrate = LearningRateScheduler(step_decay) |
| # Compile model | |
| epochs = 50 | |
| learning_rate = 0.1 | |
| decay_rate = learning_rate / epochs | |
| momentum = 0.8 | |
| sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False) | |
| model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) |
| import tensorflow as tf | |
| from tensorflow.python.ops import init_ops | |
| from tensorflow.python.ops import array_ops | |
| from tensorflow.python.ops import math_ops | |
| from tensorflow.python.ops.rnn_cell_impl import RNNCell, _linear | |
| class LRUCell(RNNCell): | |
| """Lattice Recurrent Unit (LRU). | |
| This implementation is based on: |
| import numpy as np | |
| from keras import backend as K | |
| from keras.models import Sequential | |
| from keras.layers import Dense, Activation | |
| def fro_norm(w): | |
| """Frobenius norm.""" | |
| return K.sqrt(K.sum(K.square(K.abs(w)))) |
| def dot_product(x, kernel): | |
| """ | |
| Wrapper for dot product operation, in order to be compatible with both | |
| Theano and Tensorflow | |
| Args: | |
| x (): input | |
| kernel (): weights | |
| Returns: | |
| """ | |
| if K.backend() == 'tensorflow': |
| from keras import backend as K, initializers, regularizers, constraints | |
| from keras.engine.topology import Layer | |
| def dot_product(x, kernel): | |
| """ | |
| Wrapper for dot product operation, in order to be compatible with both | |
| Theano and Tensorflow | |
| Args: |
| class AttentionLSTM(LSTM): | |
| """LSTM with attention mechanism | |
| This is an LSTM incorporating an attention mechanism into its hidden states. | |
| Currently, the context vector calculated from the attended vector is fed | |
| into the model's internal states, closely following the model by Xu et al. | |
| (2016, Sec. 3.1.2), using a soft attention model following | |
| Bahdanau et al. (2014). | |
| The layer expects two inputs instead of the usual one: |
| """ | |
| Implementation of pairwise ranking using scikit-learn LinearSVC | |
| Reference: "Large Margin Rank Boundaries for Ordinal Regression", R. Herbrich, | |
| T. Graepel, K. Obermayer. | |
| Authors: Fabian Pedregosa <fabian@fseoane.net> | |
| Alexandre Gramfort <alexandre.gramfort@inria.fr> | |
| """ |