https://interactionstation.wdka.hro.nl/mediawiki/api.php?action=feedcontributions&user=Evolutie&feedformat=atomInteraction Station Wiki - User contributions [en]2024-03-29T05:32:19ZUser contributionsMediaWiki 1.35.7https://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Using_motion_recognition_on_the_Arduino&diff=10226Using motion recognition on the Arduino2023-03-06T10:42:35Z<p>Evolutie: /* Serso */</p>
<hr />
<div>===<span style="color: rgb(150, 0, 0)">Explanation coming soon ...</span>===<br />
<br />
<br />
== Sender (Central) == <br />
Swap out the first line with your trained model library <br />
<br />
<syntaxhighlight lang=c style="border:1px dashed black; max-width: 950px;"><br />
#include <Interfaces-Sense_inferencing.h> // Change to your model library<br />
#include <ArduinoBLE.h><br />
#include <Arduino_APDS9960.h><br />
#include <Arduino_LSM9DS1.h><br />
<br />
#define CONVERT_G_TO_MS2 9.80665f<br />
#define MAX_ACCEPTED_RANGE 2.0f<br />
<br />
static bool debug_nn = false;<br />
<br />
const char* deviceServiceUuid = "19b10000-e8f2-537e-4f6c-d104768a1214";<br />
const char* deviceServiceCharacteristicUuid = "19b10001-e8f2-537e-4f6c-d104768a1214";<br />
<br />
int gesture = -1;<br />
int oldGestureValue = -1; <br />
<br />
void setup() {<br />
Serial.begin(9600);<br />
<br />
pinMode(LEDR, OUTPUT);<br />
pinMode(LEDG, OUTPUT);<br />
pinMode(LEDB, OUTPUT);<br />
pinMode(LED_BUILTIN, OUTPUT);<br />
<br />
digitalWrite(LEDR, LOW);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
<br />
if (!IMU.begin()) {<br />
Serial.println("- Failed to initialize accelerometer!");<br />
}<br />
else {<br />
Serial.println("- Accelerometer initialized!");<br />
}<br />
<br />
if (EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME != 3) {<br />
Serial.println("EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME should be equal to 3 (x,y,z)");<br />
return;<br />
}<br />
<br />
if (!BLE.begin()) {<br />
Serial.println("- Starting BLE module failed!");<br />
while (1);<br />
}<br />
<br />
BLE.setLocalName("Nano 33 BLE Sense (Central)"); <br />
BLE.advertise();<br />
}<br />
<br />
void loop() {<br />
connectToPeripheral();<br />
}<br />
<br />
void connectToPeripheral(){<br />
BLEDevice peripheral;<br />
<br />
Serial.println("- Discovering peripheral device...");<br />
<br />
do<br />
{<br />
BLE.scanForUuid(deviceServiceUuid);<br />
peripheral = BLE.available();<br />
} while (!peripheral);<br />
<br />
if (peripheral) {<br />
Serial.println("* Peripheral device found!");<br />
Serial.print("* Device MAC address: ");<br />
Serial.println(peripheral.address());<br />
Serial.print("* Device name: ");<br />
Serial.println(peripheral.localName());<br />
Serial.print("* Advertised service UUID: ");<br />
Serial.println(peripheral.advertisedServiceUuid());<br />
Serial.println(" ");<br />
BLE.stopScan();<br />
controlPeripheral(peripheral);<br />
}<br />
}<br />
<br />
void controlPeripheral(BLEDevice peripheral) {<br />
Serial.println("- Connecting to peripheral device...");<br />
<br />
if (peripheral.connect()) {<br />
Serial.println("* Connected to peripheral device!");<br />
Serial.println(" ");<br />
// Change LED to green<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, LOW);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
} else {<br />
Serial.println("* Connection to peripheral device failed!");<br />
Serial.println(" ");<br />
return;<br />
}<br />
<br />
Serial.println("- Discovering peripheral device attributes...");<br />
if (peripheral.discoverAttributes()) {<br />
Serial.println("* Peripheral device attributes discovered!");<br />
Serial.println(" ");<br />
} else {<br />
Serial.println("* Peripheral device attributes discovery failed!");<br />
Serial.println(" ");<br />
peripheral.disconnect();<br />
return;<br />
}<br />
<br />
BLECharacteristic gestureCharacteristic = peripheral.characteristic(deviceServiceCharacteristicUuid);<br />
<br />
if (!gestureCharacteristic) {<br />
Serial.println("* Peripheral device does not have gesture_type characteristic!");<br />
peripheral.disconnect();<br />
return;<br />
} else if (!gestureCharacteristic.canWrite()) {<br />
Serial.println("* Peripheral does not have a writable gesture_type characteristic!");<br />
peripheral.disconnect();<br />
return;<br />
}<br />
<br />
while (peripheral.connected()) {<br />
gesture = gestureDetection();<br />
<br />
if (oldGestureValue != gesture) { <br />
oldGestureValue = gesture;<br />
Serial.print("- Writing value to gesture characteristic: ");<br />
Serial.println(gesture);<br />
gestureCharacteristic.writeValue((byte)gesture);<br />
Serial.println("- Writing value to gesture characteristic done!");<br />
Serial.println(" ");<br />
}<br />
<br />
}<br />
Serial.println("- Peripheral device disconnected!");<br />
// Change LED to red<br />
digitalWrite(LEDR, LOW);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
}<br />
<br />
<br />
int gestureDetection() <br />
{<br />
int gesture_index;<br />
Serial.println("- Start inferencing in 2 second...");<br />
delay(2000);<br />
Serial.println("- Getting accelorometer sample...");<br />
<br />
// Allocate a buffer here for the values we'll read from the IMU<br />
float buffer[EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE] = { 0 };<br />
<br />
for (size_t ix = 0; ix < EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE; ix += 3) {<br />
// Determine the next tick (and then sleep later)<br />
uint64_t next_tick = micros() + (EI_CLASSIFIER_INTERVAL_MS * 1000);<br />
<br />
IMU.readAcceleration(buffer[ix], buffer[ix + 1], buffer[ix + 2]);<br />
<br />
for (int i = 0; i < 3; i++) {<br />
if (fabs(buffer[ix + i]) > MAX_ACCEPTED_RANGE) {<br />
buffer[ix + i] = ei_get_sign(buffer[ix + i]) * MAX_ACCEPTED_RANGE;<br />
}<br />
}<br />
<br />
buffer[ix + 0] *= CONVERT_G_TO_MS2;<br />
buffer[ix + 1] *= CONVERT_G_TO_MS2;<br />
buffer[ix + 2] *= CONVERT_G_TO_MS2;<br />
<br />
delayMicroseconds(next_tick - micros());<br />
}<br />
<br />
// Turn the raw buffer in a signal which we can the classify<br />
signal_t signal;<br />
int err = numpy::signal_from_buffer(buffer, EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, &signal);<br />
if (err != 0) {<br />
Serial.println("Failed to create a signal!");<br />
return -1;<br />
}<br />
<br />
// Run the classifier<br />
ei_impulse_result_t result = { 0 };<br />
<br />
err = run_classifier(&signal, &result, debug_nn);<br />
if (err != EI_IMPULSE_OK) {<br />
Serial.println("Failed to run classifier!");<br />
return -1;<br />
}<br />
<br />
for (int ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {<br />
if (result.classification[ix].value > 0.8) {<br />
gesture_index = ix;<br />
Serial.println(" ");<br />
Serial.print("* Detected gesture: ");<br />
Serial.print(result.classification[ix].label);<br />
Serial.print(" with index number: ");<br />
Serial.println(ix);<br />
Serial.println(" ");<br />
}<br />
}<br />
return gesture_index; <br />
}<br />
<br />
float ei_get_sign(float number) {<br />
return (number >= 0.0) ? 1.0 : -1.0;<br />
}<br />
</syntaxhighlight><br />
<br />
== Receiver (Peripheral) ==<br />
<br />
<syntaxhighlight lang=c style="border:1px dashed black; max-width: 950px;"><br />
#include <ArduinoBLE.h><br />
<br />
enum {<br />
GESTURE_NONE = -1,<br />
GESTURE_SLEEP = 0,<br />
GESTURE_UPDOWN = 1,<br />
GESTURE_WAVE = 2,<br />
};<br />
<br />
const char* deviceServiceUuid = "19b10000-e8f2-537e-4f6c-d104768a1214";<br />
const char* deviceServiceCharacteristicUuid = "19b10001-e8f2-537e-4f6c-d104768a1214";<br />
<br />
int gesture = -1;<br />
<br />
BLEService gestureService(deviceServiceUuid); <br />
BLEByteCharacteristic gestureCharacteristic(deviceServiceCharacteristicUuid, BLERead | BLEWrite);<br />
<br />
<br />
void setup() {<br />
Serial.begin(9600);<br />
<br />
pinMode(LEDR, OUTPUT);<br />
pinMode(LEDG, OUTPUT);<br />
pinMode(LEDB, OUTPUT);<br />
pinMode(LED_BUILTIN, OUTPUT);<br />
<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
<br />
<br />
if (!BLE.begin()) {<br />
Serial.println("- Starting BLE module failed!");<br />
while (1);<br />
}<br />
<br />
BLE.setLocalName("Nano 33 BLE Sense (Peripheral)");<br />
BLE.setAdvertisedService(gestureService);<br />
gestureService.addCharacteristic(gestureCharacteristic);<br />
BLE.addService(gestureService);<br />
gestureCharacteristic.writeValue(-1);<br />
BLE.advertise();<br />
}<br />
<br />
void loop() {<br />
BLEDevice central = BLE.central();<br />
Serial.println("- Discovering central device...");<br />
delay(500);<br />
<br />
if (central) {<br />
Serial.println("* Connected to central device!");<br />
Serial.print("* Device MAC address: ");<br />
Serial.println(central.address());<br />
Serial.println(" ");<br />
<br />
while (central.connected()) {<br />
if (gestureCharacteristic.written()) {<br />
gesture = gestureCharacteristic.value();<br />
writeGesture(gesture);<br />
}<br />
}<br />
<br />
Serial.println("* Disconnected to central device!");<br />
}<br />
}<br />
<br />
void writeGesture(int gesture) {<br />
Serial.println("- Gesture characteristic has changed!");<br />
<br />
switch (gesture) {<br />
case GESTURE_SLEEP:<br />
Serial.println("* Actual value: SLEEP (0) (red LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, LOW);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
break;<br />
case GESTURE_UPDOWN:<br />
Serial.println("* Actual value: UPDOWN (1) (green LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, LOW);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
break;<br />
case GESTURE_WAVE:<br />
Serial.println("* Actual value: WAVE (2) (blue LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, LOW);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
break;<br />
default:<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
break;<br />
} <br />
}<br />
</syntaxhighlight><br />
==Connect some Outputs==<br />
<br />
===Arduino BLE 33 sense pinout===<br />
[[File:SRL-image-0.png | 900 px]]<br />
===Servo===<br />
The servo motor can connect directly to a PWM digital pin, check pinout above. <br><br />
On the image below, the signal line (yellow wire) is connected to D2. <br><br />
Black is GND and red is connected to +5V (beware need to solder a pad on the bottom of the Arduino in order to activate the 5V pin) <br><br />
<br />
[[File:Ble servo.png | 600 px]]<br />
<br />
===DC motor + other higher voltage load===<br />
<br><br />
[[File:Ble DCmotor external powersupply.png | 600 px]]<br />
<br><br />
<br><br />
[[File:Ble MOSFET load external powersupply.png | 600 px]]<br />
<br><br />
<br><br />
[[File:900px-MOSFET motor schem BLE33.jpg]]<br />
<br />
== Receiver (Peripheral) with Servo motor /DC motor, etc. ==<br />
<br />
<syntaxhighlight lang=c style="border:1px dashed black; max-width: 950px;"><br />
#include <ArduinoBLE.h><br />
#include <Servo.h><br />
<br />
<br />
enum {<br />
GESTURE_NONE = -1,<br />
GESTURE_SLEEP = 0,<br />
GESTURE_UPDOWN = 1,<br />
GESTURE_WAVE = 2,<br />
};<br />
<br />
const char* deviceServiceUuid = "19b10000-e8f2-537e-4f6c-d104768a1214";<br />
const char* deviceServiceCharacteristicUuid = "19b10001-e8f2-537e-4f6c-d104768a1214";<br />
<br />
int gesture = -1;<br />
<br />
BLEService gestureService(deviceServiceUuid); <br />
BLEByteCharacteristic gestureCharacteristic(deviceServiceCharacteristicUuid, BLERead | BLEWrite);<br />
<br />
Servo myservo; // create servo object to control a servo<br />
// twelve servo objects can be created on most boards<br />
<br />
<br />
<br />
void setup() {<br />
Serial.begin(9600);<br />
<br />
pinMode(LEDR, OUTPUT);<br />
pinMode(LEDG, OUTPUT);<br />
pinMode(LEDB, OUTPUT);<br />
pinMode(LED_BUILTIN, OUTPUT);<br />
<br />
myservo.attach(2); // attaches the servo on pin 2 to the servo object<br />
pinMode(3, OUTPUT); //pin number 3 defined as output, can connect fan + various actuators, lights(fan, solenoid,vibration motor, led strip, relay, etc.)<br />
pinMode(4, OUTPUT); //pin number 4 defined as output, can connect DC motor + various actuators, lights(fan, solenoid,vibration motor, led strip, relay, etc.)<br />
<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
<br />
<br />
if (!BLE.begin()) {<br />
Serial.println("- Starting BLE module failed!");<br />
while (1);<br />
}<br />
<br />
BLE.setLocalName("Nano 33 BLE Sense (Peripheral)");<br />
BLE.setAdvertisedService(gestureService);<br />
gestureService.addCharacteristic(gestureCharacteristic);<br />
BLE.addService(gestureService);<br />
gestureCharacteristic.writeValue(-1);<br />
BLE.advertise();<br />
<br />
}<br />
<br />
void loop() {<br />
BLEDevice central = BLE.central();<br />
Serial.println("- Discovering central device...");<br />
delay(500);<br />
<br />
if (central) {<br />
Serial.println("* Connected to central device!");<br />
Serial.print("* Device MAC address: ");<br />
Serial.println(central.address());<br />
Serial.println(" ");<br />
<br />
while (central.connected()) {<br />
if (gestureCharacteristic.written()) {<br />
gesture = gestureCharacteristic.value();<br />
writeGesture(gesture);<br />
}<br />
}<br />
<br />
Serial.println("* Disconnected to central device!");<br />
}<br />
}<br />
<br />
void writeGesture(int gesture) {<br />
Serial.println("- Gesture characteristic has changed!");<br />
<br />
switch (gesture) {<br />
case GESTURE_SLEEP:<br />
Serial.println("* Actual value: SLEEP (0) (red LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, LOW);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
myservo.write(10); // tell servo to go to position 10 degrees<br />
digitalWrite(3, LOW); <br />
digitalWrite(4, LOW);<br />
<br />
<br />
<br />
break;<br />
case GESTURE_UPDOWN:<br />
Serial.println("* Actual value: UPDOWN (1) (green LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, LOW);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
myservo.write(50); // tell servo to go to position 50 degrees<br />
digitalWrite(3, HIGH); // start DC motor<br />
digitalWrite(4, LOW); // fan is off<br />
<br />
<br />
<br />
break;<br />
case GESTURE_WAVE:<br />
Serial.println("* Actual value: WAVE (2) (blue LED on)");<br />
Serial.println(" ");<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, LOW);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
myservo.write(180); // tell servo to go to position 180 degrees<br />
digitalWrite(3, LOW); // DC motor is off <br />
digitalWrite(4, HIGH); // run fan <br />
<br />
<br />
break;<br />
default:<br />
digitalWrite(LEDR, HIGH);<br />
digitalWrite(LEDG, HIGH);<br />
digitalWrite(LEDB, HIGH);<br />
digitalWrite(LED_BUILTIN, LOW);<br />
myservo.write(10); // tell servo to go to position 10 degrees<br />
digitalWrite(3, LOW); // DC motor is off<br />
digitalWrite(4, LOW); // fan is off<br />
<br />
<br />
break;<br />
<br />
} <br />
}<br />
</syntaxhighlight><br />
<br><br />
<br><br />
<br><br />
<br />
[[Category:Motion Tracking]][[Category:Arduino]][[Category:Classification Models]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Stable_Diffusion&diff=9951Stable Diffusion2023-01-20T10:22:32Z<p>Evolutie: </p>
<hr />
<div>== Generating images from a sentence using Stable Diffusion ==<br />
<br />
Stable Diffusion is an AI algorithm developed by [https://github.com/CompVis/stable-diffusion CompVis] and sponsored primarily by [https://stability.ai/ Stability AI], a startup that aims to be the driving-force behind the open-source AI. The algorithm itself builds on ideas from [https://openai.com/dall-e-2/ Open AI’s DALL-E 2], [https://imagen.research.google/ Google’s Imagen] and other image generation models, with some extra optimizations.<br />
<br />
=== Running Stable Diffusion ===<br />
At the time of writing, Stable Diffusion (SD) is only installed on the PCs in classroom WH02.110. So find yourself a PC and [[Ubuntu | login to Ubuntu]]. After you've logged in, open the [[Terminal | terminal]] and enter the following commands to start up SD.</br></br><br />
<br />
1: Navigate to the SD folder<br />
cd ~/MachineLearning/stable-diffusion-webui<br />
<br />
2: Activate the environment<br />
conda activate stable-diffusion<br />
<br />
3: Start SD<br />
python launch.py --opt-split-attention --medvram --disable-safe-unpickle<br />
<br />
The moment you run the final command, some output will appear within the terminal. At a certain point you will see <code>Running on local URL: http://127.0.0.1:7860</code>. If you open any browser and navigate to this link (or just click the link in the terminal while holding the <code>ctrl</code> key), you will be greeted by the SD interface. Keep in mind that this is a local URL, meaning that you can only visit this website on the PC which is running the SD command you typed in at step 3.<br />
<br />
=== Image sizes ===<br />
'''[1:1]''' Square: <code>--W 512 --H 512</code><br><br />
'''[16:9]''' Widescreen: <code>--W 1024 --H 576</code><br><br />
'''[9:16]''' Mobile first (Instagram stories, Snapchat etc): <code>--W 576 --H 1024</code><br><br />
'''[10:16]''' Portrait: <code>--W 640 --H 1024</code><br />
<br />
== Code ==<br />
Github: https://github.com/mywdka/stable-diffusion<br />
<br />
== Tools ==<br />
Dataset explorer: https://knn5.laion.ai ([https://rom1504.github.io mirror])<br><br />
Prompt builder: https://promptomania.com/stable-diffusion-prompt-builder/<br><br />
Img to prompt: https://replicate.com/methexis-inc/img2prompt<br><br />
Prompt examples: https://lexica.art/<br><br />
Have I Been Trained: https://haveibeentrained.com/<br />
<br />
== Community ==<br />
Discord: http://discord.gg/stablediffusion<br />
<br />
== Readme with examples ==<br />
http://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase<br />
<br />
<br />
[[Category:Generative Models]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9690Quill 20232023-01-09T18:04:45Z<p>Evolutie: </p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/watch?v=HWHiSCt1oBk&ab_channel=GoroFujita A very good one to get the basics] <br/><br />
[https://www.youtube.com/watch?v=dXPv5P_pJeI&t=3486s&ab_channel=GoroFujita Tutorial on cameras] </br><br />
</br><br />
and in general </br><br />
</br><br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/><br />
<br />
</br><br />
Also check out these to get things working</br><br />
[https://interactionstation.wdka.hro.nl/wiki/About_Oculus_Quest#Oculus_Quest_and_Link_Cable Oculus Quest and Link Cable]<br/><br />
[https://interactionstation.wdka.hro.nl/wiki/About_Oculus_Quest#Fix_to_Enable_Oculus_Software_on_Student_Accounts Fix to Enable Oculus Software on Student Accounts]<br/><br />
</br><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9675Quill 20232023-01-09T12:30:04Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/watch?v=HWHiSCt1oBk&ab_channel=GoroFujita A very good one to get the basics] <br/><br />
[https://www.youtube.com/watch?v=dXPv5P_pJeI&t=3486s&ab_channel=GoroFujita Tutorial on cameras] </br><br />
</br><br />
and in general </br><br />
</br><br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9674Quill 20232023-01-09T12:29:46Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/watch?v=HWHiSCt1oBk&ab_channel=GoroFujita A very good one to get the basics] <br/><br />
[https://www.youtube.com/watch?v=dXPv5P_pJeI&t=3486s&ab_channel=GoroFujita Tutorial on cameras] </br><br />
</br><br />
and in general </br><br />
</br><br />
<br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9673Quill 20232023-01-09T12:28:46Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/watch?v=HWHiSCt1oBk&ab_channel=GoroFujita A very good one to get the basics] <br/><br />
[https://www.youtube.com/watch?v=dXPv5P_pJeI&t=3486s&ab_channel=GoroFujita Tutorial on cameras] </br><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9672Quill 20232023-01-09T12:21:58Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9671Quill 20232023-01-09T12:19:24Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
[https://discord.gg/cghmzwT Quill Discord Channel]<br/]<br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9670Quill 20232023-01-09T12:06:32Z<p>Evolutie: /* Week 2 : Revisit */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
<br />
Monday<br><br />
Camera and Sound - Basics <br/><br />
<br />
Tuesday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9669Quill 20232023-01-09T12:04:45Z<p>Evolutie: /* Week 1 : Sketch */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing <br/><br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9668Quill 20232023-01-09T12:04:03Z<p>Evolutie: /* Week 1 : Sketch */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Tuesday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the controllers with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Namoo<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2023&diff=9667Quill 20232023-01-09T10:51:16Z<p>Evolutie: Created page with "== Elective - Quill == == Tutors == Brigit Lichtenegger<br/> Cesare Davolio<br/> == Description == The recent developments in the field of Virtual Reality have not only brou..."</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Lessons_%26_courses&diff=9666Lessons & courses2023-01-09T10:50:39Z<p>Evolutie: </p>
<hr />
<div>*[[Algorithmic Drawing]]<br />
*[[bibliotecha]]<br />
*[[Chihuahua or Muffin?]]<br />
*[[Chihuahua or Muffin? (2019)]]<br />
*[[Chihuahua or Muffin? (2018)]]<br />
*[[Critical Making Session]]<br />
*[[Critical Tools: The Fabulous School of Octopy]]<br />
*[[Debugging electronics]]<br />
*[[Digital Craft classes]]<br />
*[[Dirty Data and Dog Nuggets]]<br />
*[[Energy for Designers]]<br />
*[[Food Station 2.0]]<br />
*[[Hardware Hacking]]<br />
*[[Intro Interactive Fashion]]<br />
*[[Kinect_Hacks_2015-2016]] <br />
*[[Late Night Soldering: Wind]]<br />
*[[Late Night Soldering : Water]]<br />
*[[Paper, Strings & Electronic things]]<br />
*[[Play FA 1.1]]<br />
*[[Points of departure: Wearables]]<br />
*[[Programming with Processing]]<br />
*[[Programming with Processing(2019)]]<br />
*[[Prototyping Future Realities w/ Tilt Brush]]<br />
*[[Quill 2023]]<br />
*[[Quill 2022]]<br />
*[[Repair and Broken World Thinking]]<br />
*[[Product Design Intro to Arduino]]<br />
*[[RE-]]<br />
*[[Sensing]]<br />
*[[The dying art of computer viruses]]<br />
*[[Trans/humanism/Trans/formationdesign]]<br />
*[[Twitter Course]]<br />
*[[Zoetrope workshop readme]]<br />
*[[Fashion Technology|E-textiles and Wearables]]<br />
*[[MicrobitStationSkill]]<br />
*[[Automata]]<br />
*[[Tinkering workshop]]<br />
*[[physical interfaces]]<br />
*[[manifesto|Manifesto/Movement]]<br />
*[[how it's made]]<br />
*[[DBK4]]<br />
*[[MakingCapacitiveSensors]]<br />
*[[Transformation Design:Waste Streams]]<br />
*[[Bleep Slider mini synth]]<br />
*[[De-fine art]]<br />
*[[Diving in Aether]]<br />
*[[Demystifying the Æther]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Lessons_%26_courses&diff=9665Lessons & courses2023-01-09T10:50:01Z<p>Evolutie: </p>
<hr />
<div>*[[Algorithmic Drawing]]<br />
*[[bibliotecha]]<br />
*[[Chihuahua or Muffin?]]<br />
*[[Chihuahua or Muffin? (2019)]]<br />
*[[Chihuahua or Muffin? (2018)]]<br />
*[[Critical Making Session]]<br />
*[[Critical Tools: The Fabulous School of Octopy]]<br />
*[[Debugging electronics]]<br />
*[[Digital Craft classes]]<br />
*[[Dirty Data and Dog Nuggets]]<br />
*[[Energy for Designers]]<br />
*[[Food Station 2.0]]<br />
*[[Hardware Hacking]]<br />
*[[Intro Interactive Fashion]]<br />
*[[Kinect_Hacks_2015-2016]] <br />
*[[Late Night Soldering: Wind]]<br />
*[[Late Night Soldering : Water]]<br />
*[[Paper, Strings & Electronic things]]<br />
*[[Play FA 1.1]]<br />
*[[Points of departure: Wearables]]<br />
*[[Programming with Processing]]<br />
*[[Programming with Processing(2019)]]<br />
*[[Prototyping Future Realities w/ Tilt Brush]]<br />
*[[Quill 2022]]<br />
*[[Repair and Broken World Thinking]]<br />
*[[Product Design Intro to Arduino]]<br />
*[[RE-]]<br />
*[[Sensing]]<br />
*[[The dying art of computer viruses]]<br />
*[[Trans/humanism/Trans/formationdesign]]<br />
*[[Twitter Course]]<br />
*[[Zoetrope workshop readme]]<br />
*[[Fashion Technology|E-textiles and Wearables]]<br />
*[[MicrobitStationSkill]]<br />
*[[Automata]]<br />
*[[Tinkering workshop]]<br />
*[[physical interfaces]]<br />
*[[manifesto|Manifesto/Movement]]<br />
*[[how it's made]]<br />
*[[DBK4]]<br />
*[[MakingCapacitiveSensors]]<br />
*[[Transformation Design:Waste Streams]]<br />
*[[Bleep Slider mini synth]]<br />
*[[De-fine art]]<br />
*[[Diving in Aether]]<br />
*[[Demystifying the Æther]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=9664Quill 20222023-01-09T10:48:18Z<p>Evolutie: Evolutie moved page Quill to Quill 2022 without leaving a redirect</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Making_a_video_installation_with_Raspberry_Pi&diff=8229Making a video installation with Raspberry Pi2022-03-31T07:43:34Z<p>Evolutie: </p>
<hr />
<div>=Prepare the SD card=<br />
<br />
''This tool has been tested on the following Raspberry PI models:''<br />
*Raspberry Pi 1 <br />
*Raspberry Pi 2 <br />
*Raspberry Pi 3 <br />
*Raspberry Pi 4 <br />
*Raspberry Pi Zero W <br />
<br />
*Syncing multiple video channels via Ethernet is possible with the models with Ethernet (Raspberry Pi 1, 2, 3 & 4)<br />
*Syncing multiple video channels via WiFi is possible with the models with integrated WiFi (Raspberry Pi 3, 4 and Zero W)<br />
<br />
= Download and burn the image =<br />
<br />
Ask for the image at the Interaction Station's office. You will need a 32GB SD card: <br />
File name:video_multi_channel_sync_pi_4_2020.img<br />
<br />
Download etcher<br />
https://www.balena.io/etcher/<br />
<br />
Use Etcher to flash the SD card with the image<br />
<br />
<br />
= Configure the Raspberry Pi for playing a video on a loop =<br />
<br />
Connect the Pi to a screen, keyboard and mouse:<br />
*Notes on Raspberry Pi 4:<br />
*The Raspberry Pi 4 has two HDMI (micro HDMI) outputs. You will need to use "HDMI0", that is the left output next to the power connector.<br />
<br />
<br />
Insert the SD card that you just flashed in the Pi.<br />
<br />
Plug in the power adapter. <br />
<br />
When you see a video playing press repeatedly the keys CRTL and C (at the same time) to escape the looping video.<br />
<br />
To get into the graphic interface type:<br />
startx<br />
and hit the key Enter.<br />
<br />
Insert a USB stick with your video (video coded: H264, the file name shouldn't contain spaces or non alphanumerical characters)<br />
<br />
Go to “File Manager”<br />
<br />
Search for your video in folder /media/root/PEN_FAT32<br />
<br />
Copy your video and place it in /home/pi/video-sync-loop/videos<br />
<br />
Go back to the folder video-sync-loop and edit video-sync.config<br />
<br />
adjust the configuration so that it says:<br />
playback_mode=loop<br />
<br />
Name the video in:<br />
Video_filename=yourvideoname.mp4<br />
<br />
Save the file<br />
<br />
Restart the Pi by typing:<br />
sudo reboot now<br />
And pressing the key 'Enter'. Your video should automatically start looping after rebooting<br />
<br />
= Configure multiple Raspberry Pis for playing multiple videos in sync =<br />
<br />
== Notes: ==<br />
*You need as many Raspberry Pis as videos. <br />
*All Raspberry Pis should be the same model.<br />
*The video should be the same in all Pis.<br />
*The Pis will act as either conductor or follower. <br />
*There can only be one conductor. There might be one or multiple followers.<br />
<br />
== Conductor ==<br />
Change the config file:<br />
cd /home/pi/video-sync-loop/<br />
nano video-sync.config<br />
Adjust the configuration so that it says:<br />
playback_mode=loop<br />
Look for is_conductor and set it to true:<br />
is_conductor=true<br />
<br />
=== Synchronising the Pi units via WiFi ===<br />
<br />
Then type:<br />
sudo nano /etc/network/interfaces.d/wlan0<br />
Look for the line that starts with address, and set it to:<br />
address 172.24.1.1<br />
<br />
=== Synchronising the Pi units via Ethernet ===<br />
<br />
Type:<br />
sudo nano /etc/dhcpcd.conf and type:<br />
Look for the line that starts with static ip_address, and set it to:<br />
static ip_address=192.168.1.1/24<br />
<br />
== Followers ==<br />
Change the config file:<br />
cd /home/pi/video-sync-loop/<br />
nano video-sync config<br />
Look for is_conductor and set it to false:<br />
is_conductor=false<br />
<br />
=== Synchronising the Pi units via WiFi ===<br />
<br />
Type:<br />
sudo nano /etc/network/interfaces.d/wlan0<br />
Look for the line that starts with address, and set it to:<br />
address 172.24.1.2<br />
(Only if there is only one follower. Otherwise increment this number (172.24.1.3, 172.24.1.4, etc) for the other followers)<br />
<br />
=== Synchronising the Pi units via Ethernet ===<br />
<br />
Type:<br />
sudo nano /etc/dhcpcd.conf <br />
Look for the line that starts with static ip_address, and set it to:<br />
static ip_address=192.168.1.2/24<br />
<br />
(Only if there is only one follower. Otherwise increment this number (172.24.1.3, 172.24.1.4, etc) for the other followers)<br />
<br />
= Other options in the config file =<br />
<br />
To edit the config file, in the terminal:<br />
cd /home/pi/video-sync-loop/<br />
nano video-sync config<br />
<br />
== Selecting the Audio output ==<br />
<br />
By default the sound will come out of the Audio jack of the Pi (you need to connect an amplified speaker to it).<br />
In case you'd like to have the audio played via HDMI you need to change the line:<br />
audio_out=headphone_jack<br />
to:<br />
audio_out=hdmi<br />
<br />
And save the file. If you use nano as an editor, you can save the changes by pressing the keys CTRL and X at the same time and pressing the key Enter to confirm.</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Getting_Started_With_Raspberry_Pi&diff=7411Getting Started With Raspberry Pi2022-01-25T15:38:37Z<p>Evolutie: /* What is "Raspberry Pi" about? */</p>
<hr />
<div>== What is "Raspberry Pi" about? ==<br />
The Raspberry Pi is a small computer, running a LINUX operating system.<BR><br />
Raspberry Pi, using sensors, can measure temperature, light value and switch on or off motors, lights.<BR><br />
<BR><br />
If you have no experience using microcontrollers it is better to start with the Arduino. [[Getting Started With Arduino | Getting started with Arduino]]<br><BR><br />
The Raspberry Pi can connect to the internet using a wifi stick.<BR><br />
Sounds on the Raspberry are of much better quality than the Arduino can ever produce.<BR><BR><br />
It also offers HDMI output and accelerated video decoding, which makes the Raspberry Pi perfect to use for video installations. <BR><br />
<BR><br />
Programming the R-Pi can be done in C or Python.<BR><br />
Most of the time for whatever you do, you have to install the required linux libraries. <BR><br />
<BR><br />
Typically for wearables or e-textiles you don't use the R-Pi...you only work with the chip of the Arduino (Atmega328) or even smaller (Attiny85).<BR><br />
<br />
== Raspberry Pi ==<br />
www.raspberrypi.org/<BR><br />
<br />
== Tutorials ==<br />
https://learn.adafruit.com/category/raspberry-pi<BR><br />
<br />
== Where to buy (example) ==<br />
http://www.conrad.nl/ce/nl/product/874675/Raspberry-Pi-model-B-512-MB?ref=searchDetail<BR><br />
with accessoiries, adaptor, wifi:<BR><br />
http://www.conrad.nl/ce/nl/product/616705/Raspberry-Pi-model-B-512-MB-Incl-besturingssyteem-Wheezy-Incl-behuizing-<BR>Incl-netvoeding-Incl-WiFi-stick?ref=searchDetail<br />
<br />
== Raspberry Pi versions ==<br />
Raspberry Pi model B + 512 MB (plus more USB ports, more GPIO, better power supply, four mounting holes, micro SD card)<br><br />
Raspberry Pi model B 512 MB<BR><br />
Raspberry Pi model A 256 MB (buy the B please!)<BR><br />
<br />
== Raspberry Pi's and sensors, led's, jumperwires etc ==<br />
wifi:<BR><br />
http://www.conrad.nl/ce/nl/product/993655/Raspberry-Pi-WiFi-stick-EW-7811Un?ref=list<BR><br />
gertboard:<BR><br />
http://www.conrad.nl/ce/nl/product/1000218/Raspberry-Pi-uitbreidingsprintplaat-Gertboard?ref=list<BR><br />
display:<br><br />
http://www.conrad.nl/ce/nl/product/791898/Raspberry-Pi-display-module-TFT-Display?ref=list<br><br />
Cobbler:<br><br />
http://www.conrad.nl/ce/nl/product/409205/Raspberry-Pi-uitbreidingsprintplaat-Cobbler?ref=list<br><br />
<br />
== Interesting projects ==<br />
Using a camera and a touchscreen:<br><br />
http://www.conrad.nl/ce/nl/product/1000222/Raspberry-Pi-camera-module-Infrarood-Pi-NOIR?ref=list<br><br />
https://learn.adafruit.com/diy-wifi-raspberry-pi-touch-cam<br><br />
making this into a camera uploading pictures to a server:<br><br />
http://www.pcpro.co.uk/features/386086/make-a-motion-sensing-camera-with-the-raspberry-pi<br><br />
<br />
[[Category:Making]]<br />
[[Category:Raspberry Pi]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Getting_Started_With_Raspberry_Pi&diff=7409Getting Started With Raspberry Pi2022-01-25T15:38:16Z<p>Evolutie: /* What is "Raspberry Pi" about? */</p>
<hr />
<div>== What is "Raspberry Pi" about? ==<br />
The Raspberry Pi is a small computer, running a LINUX operating system.<BR><br />
Raspberry Pi, using sensors, can measure temperature, light value and switch on or off motors, lights.<BR><br />
<BR><br />
If you have no experience using microcontrollers it is better to start with the Arduino. [[Getting Started With Arduino | Getting started with Arduino]]<br><BR><br />
The Raspberry Pi can connect to the internet using a wifi stick.<BR><br />
Sounds on the Raspberry are of much better quality than the Arduino can ever produce.<BR><br />
It also offers HDMI output and accelerated video decoding, which makes the Raspberry Pi perfect to use for video installations. <BR><br />
<BR><br />
Programming the R-Pi can be done in C or Python.<BR><br />
Most of the time for whatever you do, you have to install the required linux libraries. <BR><br />
<BR><br />
Typically for wearables or e-textiles you don't use the R-Pi...you only work with the chip of the Arduino (Atmega328) or even smaller (Attiny85).<BR><br />
<br />
== Raspberry Pi ==<br />
www.raspberrypi.org/<BR><br />
<br />
== Tutorials ==<br />
https://learn.adafruit.com/category/raspberry-pi<BR><br />
<br />
== Where to buy (example) ==<br />
http://www.conrad.nl/ce/nl/product/874675/Raspberry-Pi-model-B-512-MB?ref=searchDetail<BR><br />
with accessoiries, adaptor, wifi:<BR><br />
http://www.conrad.nl/ce/nl/product/616705/Raspberry-Pi-model-B-512-MB-Incl-besturingssyteem-Wheezy-Incl-behuizing-<BR>Incl-netvoeding-Incl-WiFi-stick?ref=searchDetail<br />
<br />
== Raspberry Pi versions ==<br />
Raspberry Pi model B + 512 MB (plus more USB ports, more GPIO, better power supply, four mounting holes, micro SD card)<br><br />
Raspberry Pi model B 512 MB<BR><br />
Raspberry Pi model A 256 MB (buy the B please!)<BR><br />
<br />
== Raspberry Pi's and sensors, led's, jumperwires etc ==<br />
wifi:<BR><br />
http://www.conrad.nl/ce/nl/product/993655/Raspberry-Pi-WiFi-stick-EW-7811Un?ref=list<BR><br />
gertboard:<BR><br />
http://www.conrad.nl/ce/nl/product/1000218/Raspberry-Pi-uitbreidingsprintplaat-Gertboard?ref=list<BR><br />
display:<br><br />
http://www.conrad.nl/ce/nl/product/791898/Raspberry-Pi-display-module-TFT-Display?ref=list<br><br />
Cobbler:<br><br />
http://www.conrad.nl/ce/nl/product/409205/Raspberry-Pi-uitbreidingsprintplaat-Cobbler?ref=list<br><br />
<br />
== Interesting projects ==<br />
Using a camera and a touchscreen:<br><br />
http://www.conrad.nl/ce/nl/product/1000222/Raspberry-Pi-camera-module-Infrarood-Pi-NOIR?ref=list<br><br />
https://learn.adafruit.com/diy-wifi-raspberry-pi-touch-cam<br><br />
making this into a camera uploading pictures to a server:<br><br />
http://www.pcpro.co.uk/features/386086/make-a-motion-sensing-camera-with-the-raspberry-pi<br><br />
<br />
[[Category:Making]]<br />
[[Category:Raspberry Pi]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7217Quill 20222022-01-09T18:55:04Z<p>Evolutie: /* Assignment */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
[https://creator.oculus.com/community/100886904754662/ Scenes by Lea Peirano]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7216Quill 20222022-01-09T18:49:23Z<p>Evolutie: /* Assignment */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br/><br />
<br />
inspiration / examples:<br/><br />
[https://www.youtube.com/watch?v=e6-bMz0I0Ag&ab_channel=GoroFujita A moment in time - Goro Fujita]<br/><br />
[https://www.oculus.com/experiences/media/222124262224487/2811542782290941/ The Big Lebowsky Tribute - Stefano Bagnoli]<br/><br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7215Quill 20222022-01-09T18:47:13Z<p>Evolutie: /* References */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube <br/><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br/><br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br/><br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]<br/></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7214Quill 20222022-01-09T18:46:28Z<p>Evolutie: /* Week 1 : Sketch */</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/><br />
<br />
Wednesday<br/><br />
Drawing - Lab<br/><br />
exercise: continue your drawing, and change as you like<br/><br />
Tech Focus: practice and played with what you learned so far<br/><br />
inspirational movie of the day: Battle Scar<br/><br />
<br />
Thursday<br/><br />
Animation - Basics + Sound<br/><br />
exercise: disrupt your scene with something (bird, bouncing ball, weather..)<br/><br />
Tech Focus: bring life to your drawings using motion mocap and frame by frame animation<br/><br />
inspirational movie of the day: Baba Yaga<br/><br />
<br />
Friday<br/><br />
Animation - More Basics and some advances stuff<br/><br />
exercise: have something in your scene react to <br/><br />
Tech Focus: rigging, grouping, puppeteering, keyframing<br />
inspirational movie of the day: Gloomy Eyes<br/><br />
<br />
=== Week 2 : Revisit=== <br />
Monday <br/><br />
Repaint<br/><br />
Use what you have learned so far to improve on what you did last week<br/><br />
<br />
Tuesday<br><br />
Camera - Basics <br/><br />
<br />
Wednesday<br><br />
Assignment time<br />
<br />
Thursday<br><br />
Assignment time<br />
<br />
Friday<br><br />
Presentations with Brigit & Ces<br />
<br />
== References ==<br />
[https://quill.art/ Quill by Smoothstep]<br />
<br />
Watch content on Oculus TV and VR Animation Player! <br/><br />
<br />
There are a lot of tutorials on Youtube,br /><br />
<br />
[https://www.youtube.com/channel/UCbTQ4StJhp1RDisqUTuJSZg/videos Goro Fujita]<br />
[https://www.youtube.com/user/Suntr0/videos Nick Ladd]<br />
[https://www.youtube.com/c/VirtualAnimation/videos A lot of classes on the Virtual Animation channel]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7213Quill 20222022-01-09T18:32:26Z<p>Evolutie: </p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.<br />
<br />
== Assignment ==<br />
Create an animated Scene with Quill.<br/><br />
This scene can be anything you feel like. An impression of your street, a scene from a movie, or a dream you once had.<br/><br />
You will then film this scene in VR to make a short 2D movie.<br />
<br />
== Planning ==<br />
<br />
=== Week 1 : Sketch ===<br />
Monday <br/><br />
Drawing - Basics<br/><br />
exercise: draw the object you chose with Quill<br/><br />
Tech Focus: freehand drawing.<br/><br />
inspirational movie of the day: Dear Angelica<br/><br />
<br />
Tuesday <br/><br />
Drawing - Precision<br/><br />
exercise: draw a spatial scene around your object of choice. Use the image reference you brought to study color, light, <br />
strokes and such.<br/><br />
Tech Focus: precision<br/><br />
inspirational movie of the day: Namoo - https://www.baobabstudios.com/namoo<br/></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Quill_2022&diff=7212Quill 20222022-01-09T18:27:19Z<p>Evolutie: Created page with "== Elective - Quill == == Tutors == Brigit Lichtenegger<br/> Cesare Davolio<br/> == Description == The recent developments in the field of Virtual Reality have not only brou..."</p>
<hr />
<div>== Elective - Quill ==<br />
<br />
== Tutors ==<br />
Brigit Lichtenegger<br/><br />
Cesare Davolio<br/><br />
<br />
== Description ==<br />
The recent developments in the field of Virtual Reality have not only brought us fantastic Virtual Experiences, but also new tools for creating art. One of these tools is Quill, a tool developed by Oculus Story Studio for illustrators to create immersive 3D animations directly within VR. Quill was developed while creating the VR piece “Dear Angelica”, which considered a key question: What do illustrations look like in VR? To answer that, the studio moved beyond traditional flat CG tools (Maya, Photoshop, etc.) and developed Quill, which allows hand movements to become strokes of color in true 3D. For the first time, creators of an animated film were inside their own creation, no longer detached from it and looking at it from the outside.<br/> <br />
<br />
In this elective students will explore Quill as a tool for Storytelling, both for Virtual Reality, as well as 2D illustration and animation. There will be technical instructions to guide students in getting to know Quill, as well as feedback on the artistic creations students will make. This elective will be especially valuable for students illustration and animation interested in the potential of Virtual Reality.</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Lessons_%26_courses&diff=7211Lessons & courses2022-01-09T18:21:36Z<p>Evolutie: </p>
<hr />
<div>*[[Algorithmic Drawing]]<br />
*[[bibliotecha]]<br />
*[[Chihuahua or Muffin?]]<br />
*[[Chihuahua or Muffin? (2019)]]<br />
*[[Chihuahua or Muffin? (2018)]]<br />
*[[Critical Making Session]]<br />
*[[Critical Tools: The Fabulous School of Octopy]]<br />
*[[Debugging electronics]]<br />
*[[Digital Craft classes]]<br />
*[[Dirty Data and Dog Nuggets]]<br />
*[[Energy for Designers]]<br />
*[[Food Station 2.0]]<br />
*[[Hardware Hacking]]<br />
*[[Intro Interactive Fashion]]<br />
*[[Kinect_Hacks_2015-2016]] <br />
*[[Late Night Soldering: Wind]]<br />
*[[Late Night Soldering : Water]]<br />
*[[Paper, Strings & Electronic things]]<br />
*[[Play FA 1.1]]<br />
*[[Points of departure: Wearables]]<br />
*[[Programming with Processing]]<br />
*[[Programming with Processing(2019)]]<br />
*[[Prototyping Future Realities w/ Tilt Brush]]<br />
*[[Quill]]<br />
*[[Repair and Broken World Thinking]]<br />
*[[Product Design Intro to Arduino]]<br />
*[[RE-]]<br />
*[[Sensing]]<br />
*[[The dying art of computer viruses]]<br />
*[[Trans/humanism/Trans/formationdesign]]<br />
*[[Twitter Course]]<br />
*[[Zoetrope workshop readme]]<br />
*[[Fashion Technology|E-textiles and Wearables]]<br />
*[[MicrobitStationSkill]]<br />
*[[Automata]]<br />
*[[Tinkering workshop]]<br />
*[[physical interfaces]]<br />
*[[manifesto|Manifesto/Movement]]<br />
*[[how it's made]]<br />
*[[DBK4]]<br />
*[[MakingCapacitiveSensors]]<br />
*[[Transformation Design:Waste Streams]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6683VQGAN+CLIP2021-09-15T20:35:05Z<p>Evolutie: /* Init Image */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br><br />
[[File:1 wild roses near the river.png]]<br><br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br><br />
[[File:2 wild roses near the river giger.png]]<br><br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br><br />
[[File:3 wild roses near the river-80 giger-20.png]]<br><br><br />
This will result in less Giger, and more Wild roses near a river<br><br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./hanhoogerbrugge.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br><br><br />
<br />
[[File:A drawing in the style of Giger imgprompt 250 it.png]]<br><br><br />
This was generated after 250 iterations using prompt: "A drawing in the style of Giger" and the following image prompt: <br><br><br />
[[File:Hanhoogerbrugge.jpg]]<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br><br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br><br><br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br><br />
“init_image”: “./hanhoogerbrugge.jpg”,<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br><br />
Init image with few "max_iterations" will result in sort of a style transfer<br><br><br />
<br />
[[File:A drawing in the style of Giger init img 80 it.png]]<br><br><br />
This was generated after 80 iteration with prompt "A drawing in the style of Giger", and the image below as init_image <br><br><br />
[[File:Hanhoogerbrugge.jpg]]<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6682VQGAN+CLIP2021-09-15T20:29:57Z<p>Evolutie: /* Max Iterations */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br><br />
[[File:1 wild roses near the river.png]]<br><br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br><br />
[[File:2 wild roses near the river giger.png]]<br><br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br><br />
[[File:3 wild roses near the river-80 giger-20.png]]<br><br><br />
This will result in less Giger, and more Wild roses near a river<br><br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./hanhoogerbrugge.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br><br><br />
<br />
[[File:A drawing in the style of Giger imgprompt 250 it.png]]<br><br><br />
This was generated after 250 iterations using prompt: "A drawing in the style of Giger" and the following image prompt: <br><br><br />
[[File:Hanhoogerbrugge.jpg]]<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br><br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br><br><br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:A_drawing_in_the_style_of_Giger_init_img_80_it.png&diff=6681File:A drawing in the style of Giger init img 80 it.png2021-09-15T20:29:05Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6680VQGAN+CLIP2021-09-15T20:27:44Z<p>Evolutie: /* Image Prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br><br />
[[File:1 wild roses near the river.png]]<br><br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br><br />
[[File:2 wild roses near the river giger.png]]<br><br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br><br />
[[File:3 wild roses near the river-80 giger-20.png]]<br><br><br />
This will result in less Giger, and more Wild roses near a river<br><br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./hanhoogerbrugge.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br><br><br />
<br />
[[File:A drawing in the style of Giger imgprompt 250 it.png]]<br><br><br />
This was generated after 250 iterations using prompt: "A drawing in the style of Giger" and the following image prompt: <br><br><br />
[[File:Hanhoogerbrugge.jpg]]<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br><br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:Hanhoogerbrugge.jpg&diff=6679File:Hanhoogerbrugge.jpg2021-09-15T20:27:17Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:A_drawing_in_the_style_of_Giger_imgprompt_250_it.png&diff=6678File:A drawing in the style of Giger imgprompt 250 it.png2021-09-15T20:22:50Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6677VQGAN+CLIP2021-09-15T20:20:22Z<p>Evolutie: /* prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br><br />
[[File:1 wild roses near the river.png]]<br><br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br><br />
[[File:2 wild roses near the river giger.png]]<br><br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br><br />
[[File:3 wild roses near the river-80 giger-20.png]]<br><br><br />
This will result in less Giger, and more Wild roses near a river<br><br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br><br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:3_wild_roses_near_the_river-80_giger-20.png&diff=6676File:3 wild roses near the river-80 giger-20.png2021-09-15T20:19:25Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:2_wild_roses_near_the_river_giger.png&diff=6675File:2 wild roses near the river giger.png2021-09-15T20:17:39Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:1_wild_roses_near_the_river.png&diff=6674File:1 wild roses near the river.png2021-09-15T20:14:22Z<p>Evolutie: </p>
<hr />
<div></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6673VQGAN+CLIP2021-09-15T13:04:14Z<p>Evolutie: /* Max Iterations */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br />
This will result in less Giger, and more Wild roses near a river<br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br><br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6672VQGAN+CLIP2021-09-15T13:03:56Z<p>Evolutie: /* Image Prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br />
This will result in less Giger, and more Wild roses near a river<br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br><br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br><br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6671VQGAN+CLIP2021-09-15T13:03:42Z<p>Evolutie: /* prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br><br />
For example:<br><br />
“prompts”: [“Wild roses near a river”],<br><br />
You can enter multiple text prompts like this:<br><br />
“prompts”: [“Wild roses near a river”, “Giger”],<br><br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br><br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br><br />
This will result in less Giger, and more Wild roses near a river<br><br />
Choosing keywords wisely for your prompts can make a huge difference! <br><br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6670VQGAN+CLIP2021-09-15T13:01:47Z<p>Evolutie: </p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
Choosing keywords wisely for your prompts can make a huge difference! <br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.<br />
<br />
== Original Config file ==<br />
You can find the original config file at https://github.com/kcosta42/VQGAN-CLIP-Docker/blob/main/configs/local.json</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6669VQGAN+CLIP2021-09-15T12:48:41Z<p>Evolutie: /* Init Image */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
Choosing keywords wisely for your prompts can make a huge difference! <br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “./flower.jpg”,<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6668VQGAN+CLIP2021-09-15T12:48:10Z<p>Evolutie: /* Image Prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
Choosing keywords wisely for your prompts can make a huge difference! <br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“./alien.jpg”, “./wild_roses.jpg”],<br />
Copy the images that you use in the VQGAN-CLIP-Docker folder<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “flower.jpg”,<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6667VQGAN+CLIP2021-09-15T12:40:41Z<p>Evolutie: /* Step 5: Check the results */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
Choosing keywords wisely for your prompts can make a huge difference! <br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“alien.jpg”, “wild_roses.jpg”],<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “flower.jpg”,<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==<br />
The final image will be saved in the folder named "outputs" in VQGAN-CLIP-Docker. Whatever you put in the "prompts" will be used to name the fil!<br />
In the folder "steps" you can find the image per iteration. In case you did a lot of steps, plz delete these!<br />
<br />
== Bonus Step: Upscaling the Image ==<br />
The website https://bigjpg.com/ allows you to upload your image and use machine learning to enlarge you image 4 x the original size for free.<br />
<br />
== Note to make life easier ==<br />
if you are playing around and running the script again and again you don't have to type the command everytime. By hitting the Up Arrow on the keyboard you can scroll through the terminal history and thus select your earlier command.</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6666VQGAN+CLIP2021-09-15T12:30:28Z<p>Evolutie: /* prompts */</p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
Choosing keywords wisely for your prompts can make a huge difference! <br />
You can see this clearly from the grid posted here https://imgur.com/a/SnSIQRu<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“alien.jpg”, “wild_roses.jpg”],<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “flower.jpg”,<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6665VQGAN+CLIP2021-09-15T12:28:09Z<p>Evolutie: </p>
<hr />
<div>== Generate Images from a text prompt ==<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"<br />
Some of the things you can change here are a bit mysterious but let's have a look at some that are interesting to us for sure<br />
<br />
=== prompts ===<br />
Here you can type what image to generate.<br />
For example:<br />
“prompts”: [“Wild roses near a river”],<br />
You can enter multiple text prompts like this:<br />
“prompts”: [“Wild roses near a river”, “Giger”],<br />
It is also possible to give weights to each text. All weights should add up to 100. e.g.:<br />
“prompts”: [“Wild roses near a river:80”, “Giger:20”],<br />
This will result in less Giger, and more Wild roses near a river<br />
<br />
=== Image Prompts ===<br />
You can also give the algorithm one or more image prompts. The computer will try to make images that are similar to the image prompts specified.<br />
“image_prompts”:[“alien.jpg”, “wild_roses.jpg”],<br />
<br />
=== Max Iterations ===<br />
Here you specify how many steps to take. More iterations will lead to more detail, and will take longer to process. 250 is usually a nice number to see if it is going in a direction you like. The more iterations the more detail, but this will also take longer to process.<br />
A low number can be nice if you also specify an init_image, and will function like style transfer.<br />
“max_iterations”: 250,<br />
<br />
=== Save frequency ===<br />
This determines after how many steps the output image will be updated.<br />
So if you want to update your generated image after 50 steps put:<br />
“save_freq”: 50,<br />
<br />
=== Size ===<br />
Here you specify the resolution of the image you are generating.<br />
Currently on our computers this can be a maximum of 576 x 320. If you specify higher numbers the scripts will crash due to out of memory errors.<br />
“size”: [576, 320],<br />
<br />
=== Init Image ===<br />
You can specify an image to start from. This will give you some control of where things will be placed.<br />
“init_image”: “flower.jpg”,<br />
<br />
== Step 4: Save the configuration and start generating ==<br />
Hit ctrl+s on the keyboard or click "save" in the top right of the window.<br />
Go to the terminal window and type the following command:<br />
python3 -m scripts.generate -c ./configs/local.json<br />
If all is well stuff comes =scrolling by as the program is moving along until it's done.<br />
<br />
== Step 5: Check the results ==</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=VQGAN%2BCLIP&diff=6664VQGAN+CLIP2021-09-15T11:52:08Z<p>Evolutie: Created page with "Create Images from a text prompt In this tutorial we will create images by typing text. If you want to do this from your home you can use this colab: https://github.com/just..."</p>
<hr />
<div>Create Images from a text prompt<br />
<br />
In this tutorial we will create images by typing text.<br />
<br />
If you want to do this from your home you can use this colab: https://github.com/justin-bennington/somewhere-ml/blob/main/S2_GAN_Art_Generator_(VQGAN%2C_CLIP%2C_Guided_Diffusion).ipynb<br />
How to use it is described here: https://docs.google.com/document/d/1Lu7XPRKlNhBQjcKr8k8qRzUzbBW7kzxb5Vu72GMRn2E/edit<br />
<br />
However, it is much faster to work on the computers at the Interaction Station, where we installed everything you need on the pc's in WH.02.110.<br />
<br />
<br />
== Step 1: Boot the PC to Ubuntu ==<br />
We need to start the computer in the Linux Ubuntu Operating System, so when you start the computer keep an eye on the screen. When it lists some options, select "Ubuntu".<br />
If the computer starts Windows, you missed it. Just restart the computer and try again.<br />
When it starts Ubuntu and asks you for a login, select user "InteractionStation", and type the password "toegangstud"<br />
<br />
== Step 2: Start the conda environment in a terminal ==<br />
<br />
Click the "Show appplications" icon in the bottom left of the screen and type "terminal" in the search box. Select the terminal icon that pops up. This will open a black window that will allow you to type commands.<br />
type the following:<br />
cd Projects/VQGAN_CLIP-Docker<br />
<br />
and then<br />
conda activate vqgan-clip<br />
<br />
You are now ready to run things but first we must modify the configuration file to your wishes.<br />
<br />
== Step 3: Modify the configuration file ==<br />
Click on the "Files" icon in the top left of the Ubuntu screen and navigate to Projects - VQGAN_CLIP-Docker - configs.<br />
Now open the file called "local.json"</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Tech_Stuff&diff=6663Tech Stuff2021-09-15T11:12:37Z<p>Evolutie: /* Tech Stuff#Machine Learning */</p>
<hr />
<div><br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Arduino<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Arduino_Introduction | Arduino Introduction]]<br />
* [[Arduino_Behind_the_Scenes | Arduino; behind the scenes]]<br />
* [[Baremetal_arduino | Make a ''baremetal'' arduino]]<br />
* [[Getting Started With Arduino | Getting started with Arduino]]<br />
* [https://learn.sparkfun.com/tutorials/connecting-arduino-to-processing How to connect Arduino and Processing]<br />
* [[Driving a motor with the L298N]]<br />
* [[Arduino_basics_workshop | Arduino Basics Workshop notes]]<br />
* [[Arduino basic_workshop | Arduino Basic Workshop extended]]<br />
* [[Arduino and relays|Arduino and Conrad relay modules at the Station]]<br />
* [[Playing sound (with the MP3 shield) & pressure plates]]<br />
* [[Capacitive sensing - trigger sound in MaxMsp ]]<br />
* [[Pressure/bend sensor]]<br />
* [[Thermistor]]<br />
* [[Keyes temperature sensor module]]<br />
* [[Relay]]<br />
* [[Optocoupler]]<br />
*[[ Adafruit Cap 1188 breakout]]<br />
* [[Arduino & Pure Data communications]]<br />
* [[A list of conductive material]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Command Line<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Command Line Useful Commands]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Audio<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Playing sound (with the MP3 shield) & pressure plates]]<br />
* [http://aubio.org/ aubio]; useful tool for things like pitch tracking & beat detection<br />
* [http://beausievers.com/synth/synthbasics/ A good introduction on the basics of audio synthesis]<br />
* [[Using a piezo element as a contact mic | Using a piezo element as a contact mic]]<br />
* [[Hacking an mp3 player]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Augmented Reality<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Augmented Reality tools - Feb 2020 | Augmented Reality tools - Feb 2021]]<br />
* [[Unity and Vuforia Tutorials 2021]]<br />
* [[Using Vuforia and Unity to make an AR app| Using Vuforia and Unity to make an AR app (old)]]<br />
* [[Interesting Augmented Reality links]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Circuit Making<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [http://digitalcraft.wdka.nl/wiki/PracticalTheExpandedToolbox/Lab2 Digital Craft lab on making a simple noise circuit]<br />
* [http://www.falstad.com/circuit/ Javascript circuit simulator]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>DMX<includeonly>]]<br></includeonly></onlyinclude>==<br />
*[[Using the DMX Dimmer Pack]]<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Emotion Recognition<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [http://nordicapis.com/20-emotion-recognition-apis-that-will-leave-you-impressed-and-concerned/ Camera / sound emotion analysis ]<br />
* [http://blog.mashape.com/list-of-20-sentiment-analysis-apis/ Text sentiment analysis APIs]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Internet of things<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [http://www.simblee.com Simblee]<br />
* [[Internet of Things]]<br />
* [[Lora/Gateway]]<br />
* [[Lora/shield]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Kinect<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[About Kinect | About the Kinect]]<br />
* [[Setting up the Kinect with Processing | Getting the Kinect to work with Processing]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Little Bits<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[About Little Bits | About the Little Bits]]<br />
* [[The Arduino Module | The Arduino Module ]]<br />
* [[The Synth Kit | The Synth Kit ]]<br />
* [[Wireless fun | Wireless fun ]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Makey Makey<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[About MakeyMakey | About the Makey Makey]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Machine Learning<includeonly>]]<br></includeonly></onlyinclude>==<br />
<br />
''Convolutional Neural Networks''<br />
*[[YOLO | YOLOv3]]<br />
*[[Deep Dream with Darknet | DeepDream with Darknet]]<br />
<br />
''Generative Models''<br />
*[[DCGAN | DCGAN]]<br />
*[[DeepFakes | DeepFakes]]<br />
*[[Pix2pix | Pix2pix]]<br />
*[[GauGAN | GauGAN]]<br />
*[[VQGAN+CLIP | VQGAN+CLIP]]<br />
<br />
''Reinforcement Learning''<br />
*[[Pong | Pong]]<br />
<br />
''Natural Language Processing''<br />
*[[RNNs with Darknet | RNNs with Darknet]]<br />
*[[RNN | RNN]]<br />
*[[GPT-2 | GPT-2]]<br />
<br />
''Other themes''<br />
*[[ML links | Machine Learning links]]<br />
<br />
''Setup / Installation''<br />
*[[ML computers setup|ML computers setup (2017)]]<br />
*[[Nvidia Drivers and CUDA | Nvidia Drivers and CUDA]]<br />
*[[ML computers python|ML computers python]]<br />
*[[ML computers tensorflow|ML computers tensorflow]]<br />
*[[ML computers jupyter notebooks|ML computers jupyter notebooks]]<br />
*[[ML computers runway|ML computers runway]]<br />
*[[ML computers nvidia-docker|ML computers nvidia-docker]]<br />
*[[Docker| Docker]]<br />
*[[NVIDIA Jetson Nano | NVIDIA Jetson Nano installation]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>OpenEEg<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[OpenEEG research]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Plotters<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [http://www.github.com/mywdka/plotter_examples Processing plotter examples]<br />
* [http://www.chiplotle.org Chiplotle!], a python library for driving plotters<br />
* [http://www.github.com/mywdka/plotterturtle Plotterturtle], a simple python module for converting turtle drawings to HPGL language<br />
* [https://en.wikipedia.org/wiki/HP-GL HPGL], HPGL language commands<br />
* [http://www.prolific.com.tw/US/ShowProduct.aspx?p_id=229&pcid=41, usb serial driver for mac]<br />
* [[4xidraw]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Processing<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Getting Started with Processing | Getting started with Processing]]<br />
* [https://learn.sparkfun.com/tutorials/connecting-arduino-to-processing How to connect Arduino and Processing]<br />
* [[Setting up the Kinect with Processing | Getting the Kinect to work with Processing]]<br />
* [[Processing to Android app | Making Android apps with Processing]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Python<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Installing pip | Installing pip]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Projection Mapping<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [http://hv-a.com/lpmt/ LPMT] (free)<br />
* [https://hcgilje.wordpress.com/vpt/ VPT7] (free)<br />
* Commercial packages with free demos a.o.: [https://resolume.com/ Resolume], [http://madmapper.com/ Madmapper].<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Web Scraping<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [https://forum.webscraper.io/t/web-scraper-on-firefox/1360 Web scraper addon for firefox]<br />
* [[SaF | Scrape and Fake]]<br />
* [http://social-metrics.org/tutorial-list/ Good tutorials on twitter scraping]<br />
* [http://www.curiositybits.com/new-page-2/ Twitter & Facebook scrape tutorials]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Raspberry Pi<includeonly>]]<br></includeonly></onlyinclude>[[File:Raspi.gif |70px]]==<br />
* [[Getting Started With Raspberry Pi | Getting started with Raspberry Pi]]<br />
* [[Raspberry Pi on Lynda | Raspberry Pi on Lynda]]<br />
* [[Raspberry Pi on Adafruit | Raspberry Pi on Adafruit]]<br />
* [[Raspberry Pi SD Cards and Images]]<br />
* [[Fourteen year old Henry's tutorials |Fourteen year old Henry's tutorials]]<br />
* [[ Raspberry pi GPIO pins | Raspberry pi GPIO pins]]<br />
* [[Beginners YouTube Channel | Beginners YouTube Channel]]<br />
* [[You might need some Python | You might need some Python]]<br />
* [[Maybe some command line magic? |Maybe some command line magic? ]]<br />
* [[Project tutorials | Project tutorials]]<br />
* [[Settings for connecting an rPi to eduroam]]<br />
* [[RPi/datalogger]]<br />
* [[Simple Line Printer interface]]<br />
* [[Making a video installation with Raspberry Pi]]<br />
* [[Raspberry_Pi_Streaming_Media_Server]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Silhouette Cameo Cutter<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[simple guide to use the cutter | Using the cutter by dummies for dummies ]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Video<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[VideoCompression | How to compress a large videofile]] - using quicktime conversion<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Unity 3D<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Unity Tutorials from the Interaction Station]]<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Virtual Reality<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[Using the Vive Trackers in Unity]]<br />
* [[About Oculus Rift | About the Oculus Rift]]<br />
* [[About Oculus Quest]]<br />
* [[About GearVR]]<br />
* [[About HTC Vive]]<br />
* [[Tilt Brush]]<br />
* [[qrVR - Quickly visualize & share 3D models in VR]]<br />
* [[Version compatibility for Oculus Rift DK2 and Unity 5.x]]<br />
* [[GearVR and Unreal 4]]<br />
* [[GearVR and Unity]]<br />
<br />
==<onlyinclude><includeonly>[[Tech Stuff#</includeonly>Web<includeonly>]]<br></includeonly></onlyinclude>==<br />
* [[FTP Upload | How to transfer a file via ftp]]<br />
* [[/MozAddOnSDK | Installing Mozilla Add-On SDK]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Tilt_Brush&diff=6580Tilt Brush2021-04-21T20:53:19Z<p>Evolutie: </p>
<hr />
<div>Tilt Brush lets you paint in 3D space with virtual reality.<br />
<br />
[https://www.tiltbrush.com/ Tilt Brush]<br />
<br />
=== How to import media ===<br />
<br />
==== On a PC ====<br />
*1. Using File Explorer go to the Tilt Brush Media Library ( Documents\Tilt Brush\Media Library ).<br><br />
*2. Put the file you're importing into the correct folder. (e.g. Images folder for images)<br><br />
*3. In Tilt Brush you can find your files via ... more options -> labs -> Local Media Library<br><br />
<br />
[[File:Tips 8-2b.png | 600px]]<br />
<br />
==== On a Oculus Quest ====<br />
*1. Connect the headset to the computer using a usb cable<br><br />
*2. Go to the file explorer on the PC. On mac use Android File Transfer.<br><br />
*3. Go to the Tilt Brush Media Library ( This PC\Quest\Internal shared storage\Tilt Brush\Media Library ).<br><br />
*4. Put the file you're importing into the correct folder. (e.g. images folder for images).<br><br />
*5. If the Quest does not appear, or Android File Transfer is not able to access it, deactivate Developer Mode:<br />
** With Oculus Connect App on iPhone/Android, login with Interaction Station Account (ask at the Station)<br />
** Select the right Oculus Device<br />
** More Settings -> Developer mode -> disable<br />
*6. Disconnect the Quest from the PC.<br><br />
*7. Start Tilt Brush and find your files via ... more options -> labs -> Local Media Library (see image above)<br><br />
<br />
=== How to export media ===<br />
<br />
==== exporting images, videos and gifs ====<br />
*1. Go to the Tools bar, and select "Cameras."<br />
*2. Get what you want to show in the snapshot view, and pull the trigger on the controller.<br />
*3. You can swap to video, 5-second gif, and auto-gif by swiping right or left on the touch pad.<br />
<br />
==== exporting 3d models ====<br />
*1. Select "More Options…"<br />
*2. Select "Labs."<br />
*3. Select "Export."<br />
<br />
==== export 360 degree video ====<br />
This can be done on a pc. check page 8 of [https://docs.google.com/document/d/11ZsHozYn9FnWG7y3s3WAyKIACfbfwb4PbaS8cZ_xjvo/edit# this document]<br />
<br />
==== Finding your exports ====<br />
*1. On a pc everything goes to Documents\Tilt Brush. 3D Models are in the folder called "Exports." Images and Gifs are in the folder called "Snapshots." Videos are in the folder called "Videos."<br><br />
*2. on oculus quest connect the quest to a computer and use explorer on a pc, or Android File Transfer on a mac and find things in This PC\Quest\Internal shared storage\Tilt Brush\Media Library <br />
*3. If the Quest does not appear, or Android File Transfer is not able to access it, deactivate Developer Mode:<br />
** With Oculus Connect App on iPhone/Android, login with Interaction Station Account (ask at the Station)<br />
** Select the right Oculus Device<br />
** More Settings -> Developer mode -> disable<br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Tilt_Brush&diff=6579Tilt Brush2021-04-21T20:30:58Z<p>Evolutie: </p>
<hr />
<div>Tilt Brush lets you paint in 3D space with virtual reality.<br />
<br />
[https://www.tiltbrush.com/ Tilt Brush]<br />
<br />
=== How to import media ===<br />
<br />
==== On a PC ====<br />
1. Using File Explorer go to the Tilt Brush Media Library ( Documents\Tilt Brush\Media Library ).<br><br />
2. Put the file you're importing into the correct folder. (e.g. Images folder for images)<br><br />
3. In Tilt Brush you can find your files via ... more options -> labs -> Local Media Library<br><br />
<br />
[[File:Tips 8-2b.png | 600px]]<br />
<br />
==== On a Oculus Quest ====<br />
1. Connect the headset to the computer using a usb cable<br><br />
2. Go to the file explorer on the PC. On mac use Android File Transfer.<br><br />
3. Go to the Tilt Brush Media Library ( This PC\Quest\Internal shared storage\Tilt Brush\Media Library ).<br><br />
4. Put the file you're importing into the correct folder. (e.g. images folder for images).<br><br />
5. Disconnect the Quest from the PC.<br><br />
6. Start Tilt Brush and find your files via ... more options -> labs -> Local Media Library (see image above)<br><br />
<br />
[[Category:Virtual Reality]]</div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=Tilt_Brush&diff=6578Tilt Brush2021-04-21T20:30:14Z<p>Evolutie: Created page with "Tilt Brush lets you paint in 3D space with virtual reality. [https://www.tiltbrush.com/ Tilt Brush] === How to import media === ==== On a PC ==== 1. Using File Explorer go..."</p>
<hr />
<div>Tilt Brush lets you paint in 3D space with virtual reality.<br />
<br />
[https://www.tiltbrush.com/ Tilt Brush]<br />
<br />
=== How to import media ===<br />
<br />
==== On a PC ====<br />
1. Using File Explorer go to the Tilt Brush Media Library ( Documents\Tilt Brush\Media Library ).<br><br />
2. Put the file you're importing into the correct folder. (e.g. Images folder for images)<br><br />
3. In Tilt Brush you can find your files via ... more options -> labs -> Local Media Library<br><br />
<br />
[[File:Tips 8-2b.png | 600px]]<br />
<br />
==== On a Oculus Quest ====<br />
1. Connect the headset to the computer using a usb cable<br><br />
2. Go to the file explorer on the PC. On mac use Android File Transfer.<br><br />
3. Go to the Tilt Brush Media Library ( This PC\Quest\Internal shared storage\Tilt Brush\Media Library ).<br><br />
4. Put the file you're importing into the correct folder. (e.g. images folder for images).<br><br />
5. Disconnect the Quest from the PC.<br><br />
6. Start Tilt Brush and find your files via ... more options -> labs -> Local Media Library (see image above)<br></div>Evolutiehttps://interactionstation.wdka.hro.nl/mediawiki/index.php?title=File:Tips_8-2b.png&diff=6577File:Tips 8-2b.png2021-04-21T20:20:26Z<p>Evolutie: </p>
<hr />
<div></div>Evolutie