Standard escape codes are prefixed with Escape:
- Ctrl-Key:
^[ - Octal:
\033 - Unicode:
\u001b - Hexadecimal:
\x1B - Decimal:
27
| import torch | |
| import torch.nn.functional as F | |
| def compute_laplace_dst(nx, ny, dx, dy, arr_kwargs): | |
| """Discrete sine transform of the 2D centered discrete laplacian | |
| operator.""" | |
| x, y = torch.meshgrid(torch.arange(1,nx-1, **arr_kwargs), | |
| torch.arange(1,ny-1, **arr_kwargs), | |
| indexing='ij') | |
| return 2*(torch.cos(torch.pi/(nx-1)*x) - 1)/dx**2 + 2*(torch.cos(torch.pi/(ny-1)*y) - 1)/dy**2 |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #include <stdbool.h> | |
| #include <time.h> | |
| #include <windows.h> | |
| #include <libavcodec/avcodec.h> | |
| #include <libavformat/avformat.h> | |
| //#include <libavutil/frame.h> | |
| #include <SDL2/SDL.h> |
| model.zero_grad() # Reset gradients tensors | |
| for i, (inputs, labels) in enumerate(training_set): | |
| predictions = model(inputs) # Forward pass | |
| loss = loss_function(predictions, labels) # Compute loss function | |
| loss = loss / accumulation_steps # Normalize our loss (if averaged) | |
| loss.backward() # Backward pass | |
| if (i+1) % accumulation_steps == 0: # Wait for several backward steps | |
| optimizer.step() # Now we can do an optimizer step | |
| model.zero_grad() # Reset gradients tensors | |
| if (i+1) % evaluation_steps == 0: # Evaluate the model when we... |