Student: Xavier Weber
Mentors: Vladimir Tyan & Yida Wang
Student on the same project: Fanny Monori
Link to accomplished work:
- PR in the opencv_contrib repository: opencv_contrib/pull/2231
| user=juergenhoetzel | |
| while curl -s "https://api.github.com/users/$user/starred?per_page=100&page=${page:-1}" \ | |
| |jq -r -e '.[].full_name' && [[ ${PIPESTATUS[1]} != 4 ]]; do | |
| let page++ | |
| done |
| # ================================================================== | |
| # module list | |
| # ------------------------------------------------------------------ | |
| # darknet latest (git) | |
| # torch latest (git) | |
| # python 3.8 (apt) | |
| # pytorch latest (pip) | |
| # onnx latest (pip) | |
| # theano latest (git) | |
| # tensorflow latest (pip) |
| import streamlit as st | |
| import os | |
| import sys | |
| import importlib.util | |
| # Parse command-line arguments. | |
| if len(sys.argv) > 1: | |
| folder = os.path.abspath(sys.argv[1]) | |
| else: | |
| folder = os.path.abspath(os.getcwd()) |
| """ | |
| Example of a Streamlit app for an interactive Prodigy dataset viewer that also lets you | |
| run simple training experiments for NER and text classification. | |
| Requires the Prodigy annotation tool to be installed: https://prodi.gy | |
| See here for details on Streamlit: https://streamlit.io. | |
| """ | |
| import streamlit as st | |
| from prodigy.components.db import connect | |
| from prodigy.models.ner import EntityRecognizer, merge_spans, guess_batch_size |
Student: Xavier Weber
Mentors: Vladimir Tyan & Yida Wang
Student on the same project: Fanny Monori
Link to accomplished work:
| import base64 | |
| import numpy as np | |
| from pycocotools import _mask as coco_mask | |
| import typing as t | |
| import zlib | |
| def encode_binary_mask(mask: np.ndarray) -> t.Text: | |
| """Converts a binary mask into OID challenge encoding ascii text.""" | |
| # check input mask -- | |
| if mask.dtype != np.bool: |
| def frames_to_TC (frames): | |
| h = int(frames / 86400) | |
| m = int(frames / 1440) % 60 | |
| s = int((frames % 1440)/24) | |
| f = frames % 1440 % 24 | |
| return ( "%02d:%02d:%02d:%02d" % ( h, m, s, f)) | |
| # Breakdown of the steps above: | |
| # Hours: Divide frames by 86400 (# of frames in an hour at 24fps). Round down to nearest integer. |
| import warnings | |
| from skimage.measure import compare_ssim | |
| from skimage.transform import resize | |
| from scipy.stats import wasserstein_distance | |
| from scipy.misc import imsave | |
| from scipy.ndimage import imread | |
| import numpy as np | |
| import cv2 | |
| ## |
| import os | |
| # convert all image like `<Any_name>_2x.png` to `<Any_name>@2x.png` | |
| [os.rename(f, f.replace('_2x', '@2x')) for f in os.listdir('.') if not f.startswith('.')] |
| Updated 2025-01-17 thanks to Yemster's comment. | |
| This should work on any architecture of Amazon Linux 2. | |
| (_Although not tested , should also work for Amazon Linux 2023_). | |
| **Prereq** | |
| - visit https://johnvansickle.com/ffmpeg/ to grab the link to the relevant tarball for your specific server architecture. | |
| - Use `uname -a` to find out your arch if unknown | |
| ### TL;DR |