Note: Great refresher/glossary on probability/statistics and related topics here
| Notation | Definition |
|---|---|
| X | Random variable |
| P(X) | Probability distribution over random variable X |
| X ~ P(X) | Random variable X follows (~) the probability distribution P(X) * |
| from matplotlib import pyplot as plt | |
| from mpl_toolkits import mplot3d | |
| def scatter(X, Y, c=None, ax=None): | |
| ax = plt.axes() if ax is None else ax | |
| ax.scatter(X.numpy(), Y.numpy(), c=None if c is None else c.numpy()) | |
| return ax | |
| def contour(X, Y, Z, levels=None, ax=None): | |
| ax = plt.axes() if ax is None else ax |
| import os | |
| import torch | |
| from torch import nn, optim | |
| from torch.nn import functional as F | |
| from torch.utils.data import DataLoader | |
| from torchvision import datasets, transforms | |
| from torchvision.utils import save_image | |
| class Encoder(nn.Module): |
| """ | |
| Introduction to Monte Carlo Tree Search | |
| http://jeffbradberry.com/posts/2015/09/intro-to-monte-carlo-tree-search/ | |
| """ | |
| from copy import deepcopy | |
| import datetime | |
| from math import log, sqrt | |
| from random import choice |
| """ | |
| The Annotated Transformer | |
| http://nlp.seas.harvard.edu/2018/04/03/attention.html | |
| Note: Only includes basic example, not real world example or multi-GPU training | |
| """ | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn |
| import torch | |
| def cast(cuda): | |
| if cuda: | |
| return lambda x: x.cuda() | |
| else: | |
| return lambda x: x | |
| # Collection of LSTM cells (including forget gates) | |
| # https://en.wikipedia.org/w/index.php?title=Long_short-term_memory&oldid=784163987 | |
| import torch | |
| from torch import nn | |
| from torch.nn import Parameter | |
| from torch.nn import functional as F | |
| from torch.nn.modules.utils import _pair | |
| from torch.autograd import Variable |
| --[[ | |
| -- Using Perlin Noise to Generate 2D Terrain and Water | |
| -- http://gpfault.net/posts/perlin-noise.txt.html | |
| --]] | |
| local image = require 'image' | |
| -- Fade function | |
| local fade = function(t) | |
| -- Provides continuous higher order derivatives for smoothness (this specifically is in the class of sigmoid functions) |
| --[[ | |
| -- Random walks down Wall Street, Stochastic Processes in Python | |
| -- http://www.turingfinance.com/random-walks-down-wall-street-stochastic-processes-in-python/ | |
| --]] | |
| local gnuplot = require 'gnuplot' | |
| local model_parameters = { | |
| all_s0 = 1000, -- Starting asset value | |
| all_time = 800, -- Amount of time to simulate for |
Note: Great refresher/glossary on probability/statistics and related topics here
| Notation | Definition |
|---|---|
| X | Random variable |
| P(X) | Probability distribution over random variable X |
| X ~ P(X) | Random variable X follows (~) the probability distribution P(X) * |
| --[[ | |
| -- Gaussian Processes for Dummies | |
| -- https://katbailey.github.io/post/gaussian-processes-for-dummies/ | |
| -- Note 1: The Cholesky decomposition requires positive-definite matrices, hence the addition of a small value to the diagonal (prevents zeros along the diagonal) | |
| -- Note 2: This can also be thought of as adding a little noise to the observations | |
| --]] | |
| local gnuplot = require 'gnuplot' | |
| -- Test data |