Tvheadend works "as a service" making use of a dedicated user: hts. Therefore its files and configuration are stored under /home/hts
Tvheadend works with upstart therefore to start/stop/restart the service:
| try: | |
| # Code here | |
| except Exception as ex: | |
| template = "An exception of type {0} occured. Arguments:\n{1!r}" | |
| message = template.format(type(ex).__name__, ex.args) | |
| print(message) | |
| # Here you can do post-mortem analysis, present GUI error message, etc. |
| #!/bin/bash | |
| # Specify destination folder | |
| mkdir -p output | |
| # Select extensions. Videos must be in the current dir | |
| extension=flv | |
| for vid in *.$extension; do | |
| codec="$(ffprobe -v error -select_streams a:0 -show_entries stream=codec_name -print_format csv=p=0 "$vid")" | |
| case "$codec" in |
| #!/bin/bash | |
| # Destination folder | |
| mkdir -p output/transcoded | |
| # Specify extensions for for loop. Videos must be in the current dir | |
| extension=flv | |
| for vid in *.$extension; | |
| do ffmpeg -i "$vid" -vn -acodec libmp3lame output/transcoded/"${vid%.$extension}."mp3; done |
| { | |
| "@context": "https://www.schema.org", | |
| "@type": "JobPosting", | |
| "id": 8991, | |
| "title": "Placement - Business / IT Process and Project Management", | |
| "description": "Explore this unique opportunity to join a global power leader...", | |
| "datePosted": "2016-06-01", | |
| "hiringOrganization": { | |
| "id": 873, | |
| "name": "Cummins Inc.", |
| mvn -T 4 clean package -Pspark-1.6 -Phadoop-2.4 -Pyarn -Ppyspark -DskipTests -Dspark.version=1.6.0 |
| #!/bin/sh | |
| # Assumes MySQL connector in /zeppelin/local-repo and Spark in /usr/spark It also assumes 8 cores | |
| SPARK_CLASSPATH=/zeppelin/interpreter/jdbc/mysql-connector-java-5.1.35.jar /usr/spark/bin/spark-shell --master local[8] |
| val sqlContext = new org.apache.spark.sql.SQLContext(sc) // optional | |
| val df = sqlContext.load("jdbc", Map( | |
| "url" -> "jdbc:mysql://<ip.address.your.db>/<table>?user=<username>&password=<pwd>", | |
| "dbtable" -> "<tablename>")) | |
| df.select("<col1>","<col2>","<col3>").save("</path/to/parquet/file.parquet>","parquet") | |
| //Alternatively, to save all the columns: |