├── .gitignore ├── Planning-Assessments.md ├── README.md ├── Week-1.md ├── Week-2.md ├── Week-3.md ├── Week-4.md ├── Week-5-old.md ├── Week-5.md ├── Week-6-1.md ├── Week-6-Exercise-intro-to-image-data-and-tensorflow.ipynb ├── Week-6-treats ├── NumPy_SciPy_Pandas_Quandl_Cheat_Sheet.pdf ├── Numpy_Python_Cheat_Sheet.pdf ├── Python3_reference_cheat_sheet.pdf └── numpy.docx ├── Week-7-notebooks ├── Week-7-CADL-Lecture2.ipynb ├── Week-7-MNIST.ipynb ├── animation.gif ├── tf2_arbitrary_image_stylization.ipynb └── tf_hub_generative_image_module.ipynb ├── Week-7.md ├── Week-8.md ├── hugging-face-summarise.ipynb ├── in-class-exercise-assessments.md ├── information-on-presentation-requirements.pdf ├── python-intro.pdf ├── style_transfer.ipynb ├── types.png ├── week1-code ├── MyFirstClass.png ├── main.cpp.png └── readme.md ├── week2-more-examples ├── Artificial-life │ ├── boid.cpp │ ├── boid.h │ ├── main.cpp │ ├── testApp.cpp │ └── testApp.h ├── Fractals │ ├── Julia │ │ ├── ofApp.cpp │ │ └── ofApp.h │ └── mandel │ │ ├── ofApp.cpp │ │ └── ofApp.h ├── Image-processing │ ├── imageProc.cpp │ ├── imageProc.hpp │ ├── ofApp.cpp │ └── ofApp.h ├── Shaders │ ├── more-shaders │ │ ├── bin │ │ │ └── data │ │ │ │ └── shaders │ │ │ │ ├── 3Dlabs-license.txt │ │ │ │ ├── CarToPol_GLSL.frag │ │ │ │ ├── CarToPol_GLSL.vert │ │ │ │ ├── ChromaAb_GLSL.frag │ │ │ │ ├── ChromaAb_GLSL.vert │ │ │ │ ├── Fisheye_GLSL.frag │ │ │ │ ├── Fisheye_GLSL.vert │ │ │ │ ├── Gauss_GLSL.frag │ │ │ │ ├── Gauss_GLSL.vert │ │ │ │ ├── LumaKey_GLSL.frag │ │ │ │ ├── LumaKey_GLSL.vert │ │ │ │ ├── Rota_GLSL.frag │ │ │ │ ├── Rota_GLSL.vert │ │ │ │ ├── Wobble_GLSL.frag │ │ │ │ ├── Wobble_GLSL.vert │ │ │ │ ├── bloom_GLSL.frag │ │ │ │ ├── bloom_GLSL.vert │ │ │ │ ├── brcosa_GLSL.frag │ │ │ │ ├── brcosa_GLSL.vert │ │ │ │ └── vertexChomAbPassthrough.glsl │ │ └── src │ │ │ ├── main.cpp │ │ │ ├── ofApp.cpp │ │ │ └── ofApp.h │ └── of_shader_example_150 │ │ ├── bin │ │ └── data │ │ │ └── shadersGL3 │ │ │ ├── shader.frag │ │ │ └── shader.vert │ │ └── src │ │ ├── main.cpp │ │ ├── ofApp.cpp │ │ └── ofApp.h ├── openGL │ ├── bump-map │ │ ├── main.cpp │ │ ├── ofApp.cpp │ │ └── ofApp.h │ ├── mesh │ │ ├── ofApp.cpp │ │ └── ofApp.h │ ├── sphereMeshDeform │ │ ├── ofApp.cpp │ │ └── ofApp.h │ ├── surface-normal │ │ ├── main.cpp │ │ ├── ofApp.cpp │ │ └── ofApp.h │ ├── triangles │ │ ├── main.cpp │ │ ├── ofApp.cpp │ │ └── ofApp.h │ └── vertex-normal │ │ ├── main.cpp │ │ ├── ofApp.cpp │ │ └── ofApp.h └── readme.md ├── week2-slides ├── C++ memory and pointers-week-2.pdf ├── pointers_demo_code │ └── main.cpp └── week-2-C++-common-issues.pdf ├── week4-notebooks ├── Bokeh_starter.ipynb ├── Matplotlib example.ipynb ├── Numpy starter.ipynb ├── Untitled.ipynb └── lines.html └── week5-notebooks ├── Neural Nets by hand.ipynb ├── Pixel Pushing in Numpy.ipynb └── Untitled.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .ipynb_checkpoints 3 | 4 | -------------------------------------------------------------------------------- /Planning-Assessments.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | # Assessment : 4 | ## 2020-2021 5 | 6 | Professor Mick Grierson 7 | 8 | ## Introduction 9 | 10 | By now you should all have a good understanding of the various platforms and frameworks you can use to make creative computing projects that feature interactive sounds, images, 3D graphics, web platforms, machine learning, AI and embedded devices. You should also have a clear picture of why, how and when you might choose one framework over another. 11 | 12 | Let's think about some theoretical examples and consider what frameworks we should probably choose. 13 | 14 | ### You want to make a real-time interactive installation with 2D or 3D computer graphics and sound, potentially using hardware sensors. 15 | 16 | - You should probably do this in C++, possibly with openFrameworks. You could do it in Unity but remember, Unity is not the best platform for integrating real-time sensors, and isn't a good platform for sound due to its buffer implementation (it's targetted for plaback of sound effects). You could also choose JavaScript, but this won't be as performant as C++, but you would need to negotiate the sensor situation depending on the browser you are using. 17 | 18 | ### You want to create an online, interactive networked environment where people in different places can interact and respond to each other. 19 | 20 | - You should probablly do this in JavaScript. You could do it in Python but it would be harder, and there would not be much benefit. You could also do this in unity if it's a simple 2D or 3D game. 21 | 22 | ### You want to build audio or video processing software for other artists 23 | 24 | - Do this in C++. You can build great sound toys in JavaScript, but for anything serious, you need to hit the metal. For audio plugins, Juce is fine and very simple to use once you've tried openFrameworks. 25 | 26 | ### You want to create new artworks using generative deep learning 27 | 28 | - You need to do this in Python. 29 | 30 | ### You want to create a device that can use machine learning 31 | 32 | - You need to use an embedded arm device that is capable of holding reasonably sized models in memory. Consider something like a PI or similar 33 | 34 | ### You want to create a game or interactive VR experience 35 | 36 | - You can do this in Unity and it will be great. For better performance, Unreal is also good. Or you can just DIY in openGL the way I taught you. It's not hard. 37 | 38 | ### You want to scrape data off the internet and process it as part of a project 39 | 40 | - You need to do this in Python, possibly using BS4 and/or scrapey and gensim for processing. 41 | 42 | ### You want to create an internet of things device of some kind. 43 | 44 | - Raspi zero. 45 | 46 | ## In-class assignment submission. 47 | 48 | ## Instructions - submit only 4 in-class exercises! 49 | 50 | - Choose what you consider to be your 4 best in-class exercises or homeworks that you have completed as part of Coding 2. 51 | - Best if you use git, and follow the instructions on moodle! 52 | - Submit them in CODE format with an associated README file (one README per exercise), and any associated output image files. You can submit .cpp + .hpp files, iPython notebooks, any other code in text form, and example PNG/JPEGS (images in any format). 53 | 54 | Here are some ideas based on the in-class exercises and homeworks you were asked to complete. Of course, you should have spent some of your own time on these after each class, and so hopefully it won't take you too long to put the submission together. Also, it's fine if you worked with other people on these but just remember to make sure you are clear about what it is you did. 55 | 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | Professor Mick Grierson 4 | 5 | ## Introduction 6 | 7 | Welcome to Coding 2: Advanced Frameworks. In this course we build on the practical creative programming skills and experience that we developed in Coding 1 and apply them in new contexts. We expand our knowledge of specific programming languages and frameworks so that we can make better choices regarding platforms, software, hardware etc. that reflect creative requirements. We also continue to work with media through programming in ways that are specifically useful in Creative Computing contexts. 8 | 9 | The course is divided in to 2-week blocks that focus on specific areas, exploring them in ways that relate to specific languages, platforms, frameworks and approaches. The course is 'Advanced' in terms of the concepts we will engage with, not necessarily the programming approaches that we will use. We will move fast, so you will need your wits about you. You are encouraged to take written notes, especially when you are working on your own to review material covered in class. 10 | 11 | 12 | ## Schedule: 13 | 14 | The schedule is divided into four 2-week blocks that focus on specific technologies. 15 | 16 | Week 1 & 2 - C++ 17 | 18 | - Introduction to C++ fundamentals: main.cpp, #include, printing to the console, data types, conditionals, loops, functions, preprocessing and compilation. 19 | - Creating and using C++ objects: classes, .h (hpp) .cpp pairs, declaring and defining classes, basic macros. 20 | - Getting started with openFrameworks 21 | - Understanding Pointers 22 | 23 | Week 3 & 4 - More C++ and Embedded development 24 | - More on pointers 25 | - Object orientation, inheritence, polymorphism 26 | - Pointers in objects and how you can use them 27 | - Using ARM architectures for embedded systems 28 | - Running OF on ARM 29 | 30 | Week 5 & 6 - Python 31 | 32 | - Getting started with Python : Python 2 vs 3, printing to the console, import, variables, conditionals, loops, functions, def 33 | - Doing the Python Challenge!!! 34 | - Using help() and DIR() 35 | - Core libs : matplotlib, numpy, pandas, urllib, bs4, gensim, bokeh, flask. 36 | - NLP tools in gensim. 37 | 38 | Week 7 & 8 - Python Machine Learning 39 | - Introduction to Image Processing, Batch processing and Data Handling 40 | - Basic Neural Networks by hand - Forward multiply, Forward add, backward pass, calculating derivatives and gradients, numerical gradient, analytic gradient, scaling the gradient to automatically adjust parameters. Back propagation for training Neural Networks. 41 | - Introduction to Tensorflow 42 | 43 | Week 9 - Project work 44 | 45 | # Assessment 46 | Assessment is by creative project (70%), and completion of in-class assignments (30%). 47 | 48 | ## Useful resources 49 | 50 | - C plus plus documentation 51 | https://www.cplusplus.com 52 | 53 | - openframeworks.cc 54 | 55 | https://openframeworks.cc 56 | 57 | - Python 58 | 59 | https://www.python.org/ 60 | 61 | https://www.anaconda.com/distribution/ 62 | 63 | https://www.tensorflow.org/ 64 | 65 | - ML cheatsheet 66 | 67 | https://ml-cheatsheet.readthedocs.io/en/latest/ 68 | 69 | - Maths / Programming Cheat Sheet 70 | 71 | https://github.com/Jam3/math-as-code 72 | 73 | -------------------------------------------------------------------------------- /Week-1.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | ## 2019-2020 4 | 5 | Professor Mick Grierson 6 | 7 | # Week 1 - Introduction to C++ 8 | 9 | This week we will be learning some fundamentals of C++. We won't be trying to become the world's greatest C++ programmers in one day. Instead, we'll be learning the simplest possible subset of C++ approaches that we will need in order to get things done. 10 | 11 | C++ is about the most complex language there is. It's also probably the best language you can master in terms of speed, power and flexibility. However, you can do almost everything you need to do in C++ by following a basic C-style coding approach that is common in lots of languages including JavaScript, and GLSL, which you will know reasonably well by now. If you take this approach, then it's simply a matter of learning a couple of fundamentals, and then slowly developing your vocabulary over time. 12 | 13 | Fundamentally, the rule of thumb here is don't try to understand everything all at once - just focus on the essentials, and worry about the complexities once you feel confident with the basics. 14 | 15 | ## What we will cover this session 16 | 17 | - Getting up and running with an Integrated Development Environment 18 | - Understanding the basics of working with C++ as a language 19 | 20 | This is a **CRASH COURSE** but should not be too hard given how much time we have spent learning JavaScript and GLSL, which have lots of similarities. 21 | 22 | If you want a nice reference for today's session, you should of course be looking here: 23 | 24 | https://www.w3schools.com/cpp/default.asp 25 | 26 | The things I'll be introducing you to today are as follows: 27 | 28 | - preprocessing and compilation 29 | - main.cpp 30 | - #include 31 | - printing to the console 32 | - data types 33 | - conditionals 34 | - loops 35 | - functions 36 | - C++ objects / classes 37 | - declaring and defining a class 38 | - .h (hpp) .cpp pairs 39 | - basic macros 40 | - including and using classes in a project 41 | 42 | 43 | # Part One 44 | 45 | ## You need to get set up with an Integrated Development Environment (IDE). 46 | 47 | There are a couple of options. 48 | 49 | ### MacOS 50 | 51 | - Xcode (Download from the macOS App store) 52 | 53 | The current version of Xcode will require you to use the most recent macOS operating system. If you really don't want to update your computer, you can sign up for a free apple developer account and download a version that works on your operating system by exploring the available links here: 54 | 55 | https://developer.apple.com/download/more/ 56 | 57 | - MS Visual Studio (Download from the visual studio website here 58 | 59 | ### Windows 60 | 61 | - https://visualstudio.microsoft.com/ 62 | 63 | This is what you should use if you're on windows. 64 | 65 | The current version of Visual Studio claims to allow you to compile C++ code on macOS, which is technically true, but it's not as easy to do this as you might think. 66 | 67 | ## Linux 68 | 69 | You can do everything you need to on the command line using GCC, G++ and MAKE. We will be exploring this later in the course but just make sure your linux distribution has the latest GCC and everything will be fine. 70 | 71 | ## Why do I need an IDE? 72 | 73 | - OK so the real answer to this question is, you don't really need it. 74 | - But it certainly makes life easier. 75 | 76 | - A very basic IDE for C++ usually comprises a text editor, hinting / linting, a preprocessor, a compiler and debugging tools. 77 | - Let's talk a little bit about each of these so you're all clear what they can do for you. Some of you will know this stuff, but just in case, let's remind ourselves. 78 | 79 | * Linting is where a program can analyse your code and spot where it's going to fail. 80 | * Hinting is where the IDE tells you about errors and offer solutions 81 | - You will have noticed these features on mimicproject.com and elsewhere 82 | * The Preprocessor takes all the files in your project and sticks them together in to one big file which it then passes to the compiler 83 | - You can give the preprocessor some 'directives', i.e. actions, like including other files such as standard libraries for maths, logging, or whatever. 84 | * The compiler turns your code into binary form, and pulls off lots of tricks on the way. 85 | - The more time you spend working with C++, the more you will care about these tricks. But right now, all you need to know is that this is how you create a program that you can run. 86 | 87 | - You can of course use any text editor, and any compiler without using an IDE. It's totally your call. However, it will make life harder for you if you're trying to understand various frameworks, as these will expect you to be using an IDE such as those already mentioned. 88 | 89 | # Part Two 90 | 91 | SO what are these things? 92 | 93 | - main.cpp 94 | 95 | main.cpp is the most important file in your project. 96 | If main.cpp doesn't exist or contains errors, your program won't compile - it's the first file that the compiler will go looking for (more on this later). 97 | 98 | However, it's also simultaneously not that important. Most beginner programmers use main.cpp to do everything. This is fine. As you get more advanced, you might start to pass control over to other files or objects as part of your program structure. 99 | 100 | - #include 101 | 102 | This is a preprocessor directive which is used to load a class, or bunch of classes, or library (such as a standard library). C++ has lots of standard libraries such as iostream which allows you to get stuff in and out of your program. You can think of it as an equivalent to adding a script with a script tag, or using an import statement in JavaScript. 103 | 104 | - namespaces 105 | 106 | Namespaces are ways of separating standard functions into groups, and prevents functions with the same name from clashing. It's not common for programmers to make their own namespaces. There are lots of commonly used ones, the most important being the standard namespace. If you want to use lots of standard features, you can write 'using namespace std'. Some people will tell you never to do this, and just to make sure you never have two things with the same name, or any name that might be being used by something you need. This is actually **pretty good advice**. 107 | 108 | - printing to the console 109 | 110 | In C++ the most common way to print to the console is by using 'cout'. 'cout' is part of the standard library and is in the standard namespace, and has a bit of a strange syntax which we're going to look at later. If you are using the standard namespace you can just write 'cout'. If not, you need to prefix it with std::, which is shorthand for the standard namespace. This is what we're going to do most of the time. 111 | 112 | - data types 113 | 114 | OK so in JavaScript world, we only have var, let and const, which can be more or less anything, including floating point data, objects with lots of data, arrays or whatever. 115 | 116 | In C++ this is not the case. You need to specify which type of data your variable is going to hold, just like in GLSL. I love this. 117 | 118 | Types of data include int, short, char, string, float, double, long, long double. Here's a useful table to show you the difference. 119 | 120 | ![data types](https://github.com/ual-cci/MSc-Coding-2/blob/master/types.png) 121 | 122 | - conditionals 123 | 124 | You know what these are! 125 | 126 | - loops 127 | 128 | You know what these are too! 129 | 130 | - functions 131 | 132 | And these! 133 | 134 | - C++ objects / classes 135 | 136 | You also know what these are. We've used them a lot in JavaScript, and these days this is very similar to how they exist in C++. But some of you might not have made your own much. However, they are super easy and we're going to go through them. 137 | 138 | - .h (hpp) .cpp pairs 139 | 140 | Now like most things in C++, I could talk about this for a long time, but the method I am going to teach you is a good first method for structuring and adding files that contain your own objects in ways that makes it very easy for other people to understand and re-use your code. We'll go through this in the lecture 141 | 142 | - including and using classes in a project 143 | 144 | This can be tricky if your compiler can't find your files. Sometimes, your compiler can't find your files, even if they are in your project. This means you need to know how to specifically tell the compiler where to find your files. We'll discuss this in more detail next week as it will come up a lot. Once you understand the problem, it will be easy enough for you to solve it! 145 | 146 | - basic macros 147 | 148 | Macros allow you to define things at a very high level, including constants, and even functions. They are messages directly to the compiler which means they can execute in ways that are compiler specific, for example you can use a macro to define code that is only relevant if you're on windows, or specifically for mobile platforms. You can also just use them to define PI if you want. I'll show you some examples in the lecture 149 | 150 | # Part Three : Practical Lecture 151 | 152 | In the lecture, I'll talk you through the above in detail, providing practical code examples of each element where appropriate. In general, this is mainly about translating what you know from working in JavaScript and C into C++, and it's fairly straightforward. More details in the lecture! 153 | 154 | # Homework 155 | 156 | All you need to do in the homework is copy what I have done in the lecture 157 | 158 | - Download and set up an IDE on your local machine. 159 | - Create a test project and design a simple program that prints a sequence of 10 values using a function. 160 | - Create a new .cpp file and header (.hpp). Use this to make a new class of object, and then use this class in your main program. 161 | - Refer throughout to the C++ tutorial on w3c schools - and consider reading in full. It's well written, shouldn't take long and most of it you will already know : https://www.w3schools.com/cpp/ 162 | 163 | 164 | ## OpenFrameworks 165 | 166 | Next week, we're going to take a look at openFrameworks, which provides lots of functionality for you to explore whilst you settle in to your new C++ chops. 167 | 168 | openframeworks.cc 169 | -------------------------------------------------------------------------------- /Week-2.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | ## 2020-2021 4 | 5 | Professor Mick Grierson 6 | 7 | ## Week 2 - C++ and openFrameworks 8 | 9 | This week we will be looking at openFrameworks, making sure we understand how to set it up, and exploring basic examples and concepts. We'll also be looking at some more complex examples, such as the Maximilian C++ library for audio, and in particular thinking about what pointers are and when we might consider using them. 10 | 11 | openFrameworks has been around for a long time, and is a great way of creating C++ projects for media and computational arts. As a result, it's a standard Framework in industry for developing media work. It has a large community, some of whom are very active at CCI. There are comprehensive tutorials online, and a massive library of addons (some of which might not work, so be warned). But in general, if you are trying to do something in C++ and you don't want to deal with the hassle of setting up your own graphics, sound and interaction environment for prototyping, it's a great way to get started. 12 | 13 | # REVIEW OF LAST SESSION : 14 | 15 | Last week we looked at the basics of C++. Let's refresh our memory of what that included: 16 | 17 | - How to get set up using an IDE like Xcode or Visual Studio. 18 | You should all now have an IDE setup and working on your system. This is essential for this class, so if you haven't managed to do this, you need to spend as much time as you can doing this today. We will work together to help each other if and when there are problems! 19 | 20 | - Preprocessing and compilation 21 | 22 | We learned that the pre-processor has to copy and paste the contents of all the files in your C++ project in to one big file before it can build your program. To help it do that, we need to use preprocessor *directives* or instructions which tell it which files we want to use. Sometimes those files are standard libraries and functions (like or ), and sometimes they are our own files (e.g. our own headers (*.h or *.hpp *and* their *.cpp partners - remember, they come in pairs), and we use #include statements to tell the preprocesser we want them. We also need to use directives (like #ifndef) to make sure that the preprocessor only copies each file in once, as it's very common for it to be included lots of times all over the place. 23 | 24 | We also learned that the compiler takes the preprocessed output and converts it into machine code - a binary file that you can run - using methods that are totally specific to your processor family (e.g. Intel, ARM, etc.). We also learned that this makes super fast code that is lots of fun and hugely powerful, especially when you need quick access to large amounts of memory (like video). 25 | 26 | - main.cpp 27 | 28 | We learned that main.cpp is usually where our main function goes, and that without a main function, the compiler won't be able to build your binary. We also learned that you can just create an object here, and that this object can be your actual program. So main could be huge, or it could be really small depending on the structure of your program. 29 | 30 | - printing to the console 31 | 32 | We learned that these days, most people use std::cout to print to the console. There are other ways but whatever. 33 | 34 | - data types 35 | 36 | We looked at how data types need to be specified. We also looked at typecasting (converting data from one type to another), and data truncation (when you lose 37 | information resolution as a result of converting / casting data from one type to another). 38 | 39 | - Conditionals, Loops, Functions 40 | 41 | This was basically super easy and you should be fairly comfortable with it. If not, practice. 42 | 43 | - C++ objects / classes, declaring and defining a class in .h (hpp) .cpp pairs 44 | 45 | We created some basic objects in C++. First we did this in main.cpp, then we moved this into a header file (.h / .hpp) and a .cpp file pair. 46 | 47 | We learned that you *declare (say what they are)* the variables and functions for a class (the *member variables* and *member functions*) in the *header*, and *define (write the actual function code)* for the member functions in the *.cpp* file. 48 | 49 | We also learned that when you want to include files in your project, they need to be associated with your project target, and the compiler needs to be able to find them. This is going to come up as an issue for you at some point and you will be able to fix it so don't worry too much. 50 | 51 | - We also looked at MACROS, which are ways of defining stuff at the top of a file. For example, we could #define PI 3.14159 and then PI would also mean that. We also looked at how 52 | 53 | 54 | # THIS SESSION 55 | 56 | ## PART ONE: INTRO 57 | 58 | I'm going to talk you through downloading and using openFrameworks. There are some key things that you need to be aware of before you start so we're going to go through these first. 59 | 60 | - Getting openFrameworks 61 | 62 | https://openframeworks.cc - download the latest release or grab the nightly build? 63 | 64 | - OF folder structure. Where is everything and why? 65 | 66 | You need to know the structure of the openFrameworks folders. Let's take a look. 67 | 68 | - build settings for your platform (SDKs, paths) 69 | 70 | What on earth are Build settings? 71 | How do I check the build settings? 72 | What stuff is important? 73 | How are paths and how do I check them? 74 | 75 | - openFrameworks project structure (main, ofApp) 76 | 77 | What is the structure of an OF APP? How do I make changes, add methods etc.? How can I keep things clean and tidy? 78 | 79 | - Creating and using an empty project 80 | 81 | Should I copy the example project, or use the project generator? 82 | 83 | - Documentation and examples 84 | 85 | So what can I do? 86 | 87 | - Addons and how to use them 88 | 89 | https://ofxaddons.com/categories 90 | 91 | What on earth are addons? 92 | 93 | Good things and bad things about various ofxAddons contributions. 94 | 95 | - Useful examples based on last term's course material - image processing, fractals, custom geometry and shaders in OF 96 | 97 | 98 | ## PART TWO: WORKSHOP 99 | 100 | Let's build creative software together!!!!! 101 | 102 | 103 | ## PART THREE: Additional material 104 | 105 | - Common mistakes in openFrameworks 106 | 107 | Where everyone gets it wrong. 108 | 109 | - ofxMaxim example 110 | 111 | How to use ofxMaxim in openFrameworks for creating sound and music systems. Good news, it's super easy. 112 | 113 | - Pointers and Arrays, the Stack and the Heap 114 | 115 | What on earth are pointers and why should I know about them? 116 | 117 | - address-of operators and dereference operators 118 | 119 | - New and Delete, Malloc and Free, Reference Counting == no. 120 | 121 | - How and why ofxMaxim uses pointers - a quick overview of the maxiSample object 122 | 123 | 124 | # HOMEWORK : Porting a simple project from JavaScript to OF 125 | 126 | Find a project from last term that you feel good about or want to do more work on. 127 | Pick a simple one unless you really fancy a challenge 128 | Convert this project to openFrameworks. 129 | There are some useful hints for how to do this here, including some familiar and not so familiar techniques : https://github.com/ual-cci/MSc-Coding-2/tree/master/week2-more-examples 130 | -------------------------------------------------------------------------------- /Week-3.md: -------------------------------------------------------------------------------- 1 | 2 | # Session 3 3 | 4 | ## C++ Objects - Encapsulation, inheritance and polymorphism. 5 | 6 | - Today we are going to look at more advanced Object Orientation approaches. 7 | 8 | - Specifically we are going to look at how we can inherit properties from another class by treating it as a parent class 9 | - We'll also look at polymorphism and what this really means, which frankly is not all that much 10 | - We will then look at Access specifiers (public, private, protected), and how we use encapsulation with them. 11 | 12 | 13 | ## But First - A Review of last week's session 14 | 15 | - I'll take you through some of the demos and how to use them :-D 16 | 17 | - Also, we went through a list of common mistakes using C++ and openFrameworks. This stuff is super important. You can find all the information you need on these very important tips right here: 18 | https://github.com/ual-cci/MSc-Coding-2/blob/master/week2-slides/week-2-C%2B%2B-common-issues.pdf 19 | 20 | Let's go through some of this again as it's a very useful refresher!! 21 | 22 | - We also did an intro to addons, and looked at ofxMaxim. Let's take another look at how this works. 23 | 24 | - Finally we learned about pointers. I want to quickly review the pointer slides - this stuff is very hard to remember so don't worry, it's much more important that you feel comfortable looking it up!! 25 | 26 | https://github.com/ual-cci/MSc-Coding-2/blob/master/week2-slides/C%2B%2B%20memory%20and%20pointers-week-2.pdf 27 | 28 | # Let's do a homework review!! 29 | - Everyone had to create a basic openFrameworks app by adapting something simple that they did last term. Hopefully you've all managed to spend the time you need to get this done. It is really simple, but also important. As I said to Phoenix, if all you managed to do was put a picture of a kitten up on a screen, then this is a triumph and you should be proud. If you did more than this, then that's great. 30 | 31 | BREAK 32 | 33 | # Object Orientation - a primer 34 | 35 | ## Inheritance 36 | - One useful feature of C++ objects is that we can create objects substantially based on other objects 37 | - We can treat any class as a base class. This means we don't need to copy all the code from one class to another. Some people like to work this way as it means you write less code. 38 | - However, it can also make your code harder to understand as many people can't be bothered to check how the base class actually works, and this often breaks things. 39 | - But don't worry about this. Just remember, that in your own projects, it will definitely save you time to only write certain code once. 40 | - A simple example is in games, where your different agents (e.g. game characters) all need to move around a game world in the same way. 41 | - In that case, it's great to have a class called 'character', and then inherit a hole bunch of code for each character. 42 | - like this ```class myCharacter : public character``` 43 | - In this case, myCharacter is a derived (child) class, and will inherit all the code from the base (parent) class 'character'. 44 | - You can inherit from more than one class by listing your classes, separated by commas 45 | - ```class myCharacter : public character, public rainbow``` 46 | - So this time, this character also has all the propertyies of a rainbow :-) 47 | - You can inherit from a base class that is derived from another class :-) 48 | 49 | ## Polymorphism 50 | - Polymorphism is super easy to understand 51 | - All it means is that if you have a function in your base (parent) class, you can have it behave differently in your derived (child) class. 52 | - For example, this means your characters can all inherit a 'playSound()' function from your base class, but you can make them play different sounds if you like 53 | - Of course, this whole process calls in to question the entire reason behind inheriting functions to make code more reusable, but don't think about that. No, seriously, it's not worth bringing it up. Nobody wants to hear that. Nor do they want to hear about how it's possibly pointless because of virtual functions. Just leave it ok? 54 | 55 | ## Access Specifiers 56 | - Previously I asked you to just make everything public, because otherwise you wouldn't have been able to call any of your member functions or variables from outside your class. 57 | - In general, this is fine when you're starting out. However, it's considered insecure. 58 | - The problem is that, as I demonstrated previously, when you try to access a variable that is private **outside the class**, you get an error. 59 | - This would also happen if you inherit something private. 60 | - To get round this, it's usual to use something called 'encapsulation'. 61 | - All this means is you write a public function that allows you to set or get the value of a private variable. e.g. ```getMyPrivateVar() { return myPrivateVar; }``` 62 | - Hmmm. 63 | - Hmmmm. 64 | - You can also declare something as 'protected'. This means you can access it from a derived class. 65 | 66 | # Using A-Life to think about objects 67 | 68 | - Let's explore something that has a class structure so that we know what we're doing. 69 | - In order to do this, I'm going to put together one of the examples I gave you last week, and that I hope some of you have taken the time to explore as it's a lot of fun, and a good example of how a class can be useful 70 | 71 | ## Let's look at a simple artificial life example 72 | 73 | - In this example, we create a flock of virtual birds 74 | - This is an algorithm that is primarily based on the work of Craig Reynolds 75 | https://www.youtube.com/watch?v=86iQiV3-3IA - How does this work? 76 | 77 | ## How does this work 78 | - Each 'boid' is an instance of the same class 79 | - These are all sitting in an array, doing whatever they do 80 | - They have simple set of instructions that governs there behaviour 81 | 82 | ## Separation, Cohesion and Alignment 83 | - There are three basic functions - separation, cohesion and alignment 84 | - Separation : if a boid gets to within a certain threshold of another boid, move away somehow. So perhaps you can get the unit vector and use this to send it in the opposite direction 85 | - Cohesion : to prevent a boid getting too far away from all the other boids, move towards the centre of mass of all boids 86 | - Alignment : Steer towards the average heading of all boids 87 | 88 | ## Where did the boids go? 89 | 90 | - To prevent the boids flying offscreen, you can one of a few things 91 | - You can check to see if the current position of each boid is beyond the visible frame, and if so, multiply their direction vector by -1. This inverts the direction, just like we did with the bouncing ball example in week 1 92 | - You could also choose to send subtract the current width and height if they go offscreen. So if they go off the top, they will appear at the bottom and keep going... 93 | 94 | ## Building on an existing example 95 | 96 | - Let's take a look at a boids example and see how it works. This is the header file for the code I gave you a few weeks ago. 97 | 98 | ```#ifndef _BOID 99 | #define _BOID 100 | #include 101 | #include "ofMain.h" 102 | 103 | class Boid 104 | { 105 | // all the methods and variables after the 106 | // private keyword can only be used inside 107 | // the class 108 | private: 109 | ofVec3f position; 110 | ofVec3f velocity; 111 | 112 | float separationWeight; 113 | float cohesionWeight; 114 | float alignmentWeight; 115 | 116 | float separationThreshold; 117 | float neighbourhoodSize; 118 | 119 | ofVec3f separation(std::vector &otherBoids); 120 | ofVec3f cohesion(std::vector &otherBoids); 121 | ofVec3f alignment(std::vector &otherBoids); 122 | 123 | // all the methods and variables after the 124 | // public keyword can only be used by anyone 125 | public: 126 | Boid(); 127 | Boid(ofVec3f &pos, ofVec3f &vel); 128 | 129 | ~Boid(); 130 | 131 | ofVec3f getPosition(); 132 | ofVec3f getVelocity(); 133 | 134 | 135 | float getSeparationWeight(); 136 | float getCohesionWeight(); 137 | float getAlignmentWeight(); 138 | 139 | float getSeparationThreshold(); 140 | float getNeighbourhoodSize(); 141 | 142 | void setSeparationWeight(float f); 143 | void setCohesionWeight(float f); 144 | void setAlignmentWeight(float f); 145 | 146 | void setSeparationThreshold(float f); 147 | void setNeighbourhoodSize(float f); 148 | 149 | void update(std::vector &otherBoids, ofVec3f &min, ofVec3f &max); 150 | 151 | void walls(ofVec3f &min, ofVec3f &max); 152 | 153 | void draw(); 154 | }; 155 | 156 | #endif 157 | ``` 158 | ```/* 159 | * boid.cpp 160 | * boids 161 | * 162 | * Created by Marco Gillies on 05/10/2010. 163 | * Copyright 2010 Goldsmiths, University of London. All rights reserved. 164 | * 165 | */ 166 | 167 | #include "boid.h" 168 | #include "ofMain.h" 169 | 170 | Boid::Boid() 171 | { 172 | separationWeight = 1.0f; 173 | cohesionWeight = 0.2f; 174 | alignmentWeight = 0.1f; 175 | 176 | separationThreshold = 15; 177 | neighbourhoodSize = 100; 178 | 179 | position = ofVec3f(ofRandom(0, 200), ofRandom(0, 200)); 180 | velocity = ofVec3f(ofRandom(-2, 2), ofRandom(-2, 2)); 181 | } 182 | 183 | Boid::Boid(ofVec3f &pos, ofVec3f &vel) 184 | { 185 | separationWeight = 1.0f; 186 | cohesionWeight = 0.2f; 187 | alignmentWeight = 0.1f; 188 | 189 | separationThreshold = 15; 190 | neighbourhoodSize = 100; 191 | 192 | position = pos; 193 | velocity = vel; 194 | } 195 | 196 | Boid::~Boid() 197 | { 198 | 199 | } 200 | 201 | float Boid::getSeparationWeight() 202 | { 203 | return separationWeight; 204 | } 205 | float Boid::getCohesionWeight() 206 | { 207 | return cohesionWeight; 208 | } 209 | 210 | float Boid::getAlignmentWeight() 211 | { 212 | return alignmentWeight; 213 | } 214 | 215 | 216 | float Boid::getSeparationThreshold() 217 | { 218 | return separationThreshold; 219 | } 220 | 221 | float Boid::getNeighbourhoodSize() 222 | { 223 | return neighbourhoodSize; 224 | } 225 | 226 | 227 | void Boid::setSeparationWeight(float f) 228 | { 229 | separationWeight = f; 230 | } 231 | void Boid::setCohesionWeight(float f) 232 | { 233 | cohesionWeight = f; 234 | } 235 | 236 | void Boid::setAlignmentWeight(float f) 237 | { 238 | alignmentWeight = f; 239 | } 240 | 241 | 242 | void Boid::setSeparationThreshold(float f) 243 | { 244 | separationThreshold = f; 245 | } 246 | 247 | void Boid::setNeighbourhoodSize(float f) 248 | { 249 | neighbourhoodSize = f; 250 | } 251 | 252 | 253 | ofVec3f Boid::getPosition() 254 | { 255 | return position; 256 | } 257 | 258 | ofVec3f Boid::getVelocity() 259 | { 260 | return velocity; 261 | } 262 | 263 | ofVec3f Boid::separation(std::vector &otherBoids) 264 | { 265 | // finds the first collision and avoids that 266 | // should probably find the nearest one 267 | // can you figure out how to do that? 268 | for (int i = 0; i < otherBoids.size(); i++) 269 | { 270 | if(position.distance(otherBoids[i]->getPosition()) < separationThreshold) 271 | { 272 | ofVec3f v = position - otherBoids[i]->getPosition(); 273 | v.normalize(); 274 | return v; 275 | } 276 | } 277 | } 278 | 279 | ofVec3f Boid::cohesion(std::vector &otherBoids) 280 | { 281 | ofVec3f average(0,0,0); 282 | int count = 0; 283 | for (int i = 0; i < otherBoids.size(); i++) 284 | { 285 | if (position.distance(otherBoids[i]->getPosition()) < neighbourhoodSize) 286 | { 287 | average += otherBoids[i]->getPosition(); 288 | count += 1; 289 | } 290 | } 291 | average /= count; 292 | ofVec3f v = average - position; 293 | v.normalize(); 294 | return v; 295 | } 296 | 297 | ofVec3f Boid::alignment(std::vector &otherBoids) 298 | { 299 | ofVec3f average(0,0,0); 300 | int count = 0; 301 | for (int i = 0; i < otherBoids.size(); i++) 302 | { 303 | if (position.distance(otherBoids[i]->getPosition()) < neighbourhoodSize) 304 | { 305 | average += otherBoids[i]->getVelocity(); 306 | count += 1; 307 | } 308 | } 309 | average /= count; 310 | ofVec3f v = average - velocity; 311 | v.normalize(); 312 | return v; 313 | } 314 | 315 | void Boid::update(std::vector &otherBoids, ofVec3f &min, ofVec3f &max) 316 | { 317 | velocity += separationWeight*separation(otherBoids); 318 | velocity += cohesionWeight*cohesion(otherBoids); 319 | velocity += alignmentWeight*alignment(otherBoids); 320 | 321 | walls(min, max); 322 | position += velocity; 323 | } 324 | 325 | void Boid::walls(ofVec3f &min, ofVec3f &max) 326 | { 327 | if (position.x < min.x){ 328 | position.x = min.x; 329 | velocity.x *= -1; 330 | } else if (position.x > max.x){ 331 | position.x = max.x; 332 | velocity.x *= -1; 333 | } 334 | 335 | if (position.y < min.y){ 336 | position.y = min.y; 337 | velocity.y *= -1; 338 | } else if (position.y > max.y){ 339 | position.y = max.y; 340 | velocity.y *= -1; 341 | } 342 | 343 | 344 | } 345 | 346 | void Boid::draw() 347 | { 348 | ofSetColor(0, 255, 255); 349 | ofCircle(position.x, position.y, 5); 350 | } 351 | ``` 352 | 353 | # Homework 354 | 355 | ## Exercise 1 : Inheritance and polymorphism 356 | 357 | - Working in groups, get the boids project up and running 358 | - Create a new derived (child) class based on the boids class. 359 | - Using the smallest amount of code you can, change the appearance of the new class 360 | - Now create a new vector of your new boids in the project 361 | 362 | ## Exercise 2 : More complex inheritance 363 | 364 | - Working in groups or on your own, try to get your two types of boids to pay attention to each other. 365 | - Keep them as two separate flocks, but make it so that each group stays away from each other! 366 | -------------------------------------------------------------------------------- /Week-5-old.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | ## 2020-21 4 | 5 | # Session 5: Introduction to fundamental machine learning concepts with RapidLib 6 | 7 | --- 8 | 9 | 10 | ## Part one: Lecture 11 | 12 | Follow the Lecture, then go through the exercises in class. 13 | 14 | Grab the PDF presentation here: 15 | 16 | - https://github.com/ual-cci/MSc-Coding-1/blob/master/Session-9.pdf 17 | 18 | Start here: 19 | 20 | * https://mimicproject.com/code/8de3cbbe-b7c6-d79f-65fa-42fd1aa43a26 21 | 22 | ## Part Two : Exercises 23 | 24 | Do these exercises individually or in groups, and share your outcomes and observations with each other. 25 | 26 | ## Exercise 1 27 | 28 | Go through the tutorial as in the lecture here: 29 | 30 | https://mimicproject.com/guides/RAPIDMIX 31 | 32 | In the first step, you'll be asked to include the rapidLib library 33 | 34 | Try entering inputs. Observe the outputs. Explore how the neural network produces results based on those inputs. 35 | 36 | In particular: 37 | 38 | * Try entering values that are higher than any of the values in the training set 39 | * Try entering values that are lower than any of the values in the training set 40 | * What do you notice about the Neural Network's behaviour? 41 | 42 | You can grab the js code for the classification explorer here: 43 | 44 | https://mimic-238710.appspot.com/asset/7addbbfe-b039-713d-f9b9-75a6cd00e923/classification-explorer.js 45 | 46 | 47 | --- 48 | 49 | ## Exercise 2 50 | 51 | Take a look at the classification explorer here: 52 | 53 | https://mimicproject.com/code/7f92bd4e-6d2b-181c-559f-4add766f2095 54 | 55 | In order to input training examples, hold down a number key (e.g. 1) and move the mouse (you may have to click inside the example first to bring it into focus). 56 | 57 | This will input examples of that class as long as you are holding down that number key. The decision boundary will then be displayed. 58 | 59 | Try to choose a set of training examples that will draw the boundary with Class 1 on the left in green and Class 2 on the right in blue. 60 | 61 | Try and make the line as straight as possible between the two classes. 62 | 63 | Now try to make class 2 occupy the lower right quadrant. 64 | 65 | * Have you had to manufacture examples close to the decision boundary to make it fit the shape? 66 | * How might this effect the make-up of your datasets when working on an actual project? 67 | * Will it just consist of representative examples of thing you are modelling? 68 | 69 | 70 | --- 71 | 72 | ## Exercise 3 73 | 74 | Take a look at this example: 75 | 76 | https://mimicproject.com/code/3864f3e5-8263-b70e-5ef9-1037c724d4ec 77 | 78 | This extracts Mel-Frequency Ceptrum Coefficients and uses them as input to a KNN classifier. 79 | 80 | Create a selection of classes and explore how good the system is. 81 | 82 | 83 | --- 84 | 85 | ## Exercise 4 86 | 87 | Take a look at the Regression explorer: 88 | 89 | https://mimicproject.com/code/26ab5507-0d25-07eb-cb03-aaa93883765d 90 | 91 | * In order to input training examples, click onto screen at any point. 92 | 93 | * The X value denotes the input value, whereas the y value denotes the output value. You will then see the regression line drawn as you add values. 94 | 95 | * Try to get a feel for what types of lines are capable and how they’re influenced by the training data. 96 | 97 | * Create a training set that produces a diagonal line from one corner of the canvas to the other. 98 | 99 | * How easy is this to do? What issues do you face? 100 | 101 | You can grab the js code for the regression explorer here. 102 | 103 | https://mimic-238710.appspot.com/asset/26ab5507-0d25-07eb-cb03-aaa93883765d/regression-explorer.js 104 | 105 | --- 106 | 107 | ## Exercise 5 108 | 109 | Now we are going to try and see if we can train a model with 3 outputs to behave consistently. 110 | 111 | We’re going to one single input to control EXACTLY 3 output parameters. 112 | 113 | Fork the below example: 114 | 115 | https://mimicproject.com/code/5d67faaa-e4c3-771a-f824-fe5c5b978ab6 116 | 117 | This is an example of using a slider as input to control a granular synthesiser (borrowed from Zya) 118 | 119 | Granular synths play lots of small fragments (grains) of a soundfile at various positions. 120 | 121 | * Play around with the two parameters and click on different parts of the waveform to find some sounds you like. 122 | 123 | * When you are ready to record, select the "Record" checkbox. 124 | 125 | * When you are ready to play, select the "Run" checkbox. 126 | 127 | * This will train your model with the recorded dataset and now all 3 synthesiser parameters will be controlled by just the one value from the input slider. 128 | 129 | * Keep recording examples until you can reliably control the output. 130 | 131 | --- 132 | 133 | ## Exercise 6 134 | 135 | Can you take the simple RapdLib example we created at the start and use it to take different inputs, and control different outputs? 136 | 137 | How about using the system for controlling a 3D mesh? 138 | 139 | --- 140 | 141 | ## Further reading 142 | 143 | If you're interested in coding your own Neural Networks, this is a great tutorial : 144 | 145 | https://karpathy.github.io/neuralnets/ 146 | 147 | 148 | # Homework: Using Numpy to compute images 149 | 150 | This homework is to prepare you for next week's session, when we'll be creating simple machine learning systems in Python. 151 | Understanding how Numpy works is extremely useful when trying to do this, so in order to get you up to speed, I've created a little tutorial on how to use Numpy to carry out image processing. 152 | 153 | Follow the tutorial below, and then use it as a starting point to convert one of last term's graphics examples to Python using Numpy. For example, you might choose to create a version of the mandelbrot set, or some other 2D algorithmic graphics example. You might also choose to create a blur effect, or a simple 3D renderer - it's up to you. But - *don't try to create anything interactive!*. It's really not going to work... 154 | 155 | ## Python Numpy Image Processing Tutorial 156 | - You can use numpy arrays to do all the things we did in JavaScript with images 157 | - I don't recommend you do this all the time, but doing it a bit can be instructive 158 | - I've put together a simple tutorial to help you. 159 | 160 | ## Pixel pushing in numpy 161 | - This is just to get you in to the way numpy works 162 | - It will be helpful for getting you up to speed 163 | - Using Tensorflow or Pytorch without knowing numpy first is not great 164 | - Some useful extra reading is available here: 165 | - http://cs231n.github.io/python-numpy-tutorial/ 166 | 167 | ## We can use matplotlib to draw images 168 | 169 | ```Python 170 | from matplotlib import pyplot as plt 171 | 172 | # We're gonna use numpy 173 | 174 | import numpy as np 175 | 176 | # math! 177 | import math 178 | 179 | # randomly initialise an image 10 * 10 pixels 180 | my_array = np.random.random((10,10)) 181 | 182 | # Also use 'full' or 'ones' or 'zeros' 183 | # y = np.zeros((10,10)) # also try 'full((10,10),0.5)' or 'ones' 184 | 185 | # cmap sets the colourmap - https://matplotlib.org/examples/color/colormaps_reference.html 186 | # cmap is ignored for datasets with rank of 3 or 4, as these are assumed to be RGB(a) 187 | # interpolation is used when the number of pixels in the array 188 | # is such that it can't be shown properly given the size of the plot 189 | # you need to set the lower and upper limits with clim !!! 190 | 191 | plt.imshow(my_array, cmap='gray', clim=(0,1)) 192 | plt.show() 193 | 194 | ``` 195 | ## This is interesting, but how would you go about just colouring one pixel? 196 | 197 | ```Python 198 | myImage = np.zeros((10,10)) 199 | 200 | myImage[5,5]=1 201 | 202 | plt.imshow(myImage, clim=(0,1),cmap="gray") 203 | plt.show() 204 | 205 | ``` 206 | ## Drawing a circle 207 | 208 | ```python 209 | 210 | # This doesn't look great, but it is useful to do 211 | 212 | # so numpy arrays are column major, not row major. 213 | # So you need to do height first 214 | myImage = np.zeros((240,320)) 215 | 216 | # This code is exactly the same as the the code frmo 217 | TWO_PI = 3.14159 * 2 218 | segments = 600 219 | spacing = TWO_PI / segments 220 | size = 40 221 | 222 | for i in range(segments): 223 | x = math.cos(spacing * i) * size 224 | y = math.sin(spacing * i) * size 225 | myImage[math.floor(x) + 50, math.floor(y) + 50] = 1 # notice we need to floor the output to ints 226 | 227 | plt.imshow(myImage, interpolation="bilinear", clim=(0,1),cmap="gray") 228 | plt.show() 229 | 230 | ``` 231 | 232 | ## Drawing a square 233 | 234 | ```python 235 | 236 | myImage = np.zeros((240,320)) 237 | 238 | # This code is exactly the same as the the code from our JS and C++ examples 239 | TWO_PI = 3.14159 * 2 240 | segments = 600 241 | spacing = TWO_PI * 2 / segments 242 | size = 40 243 | centre = 120 244 | 245 | for i in range(240): 246 | for j in range(320): 247 | if abs(centre-i) < size and abs(centre-j) < size: 248 | myImage[i,j]=1 249 | 250 | plt.imshow(myImage, clim=(0,1),cmap="gray") 251 | plt.show() 252 | 253 | ``` 254 | 255 | ## Using a distance field to draw a circle 256 | 257 | ```Python 258 | myImage = np.zeros((240,320)) 259 | 260 | # This code is exactly the same as the the code from our JS and C++ examples 261 | 262 | centre = 100 263 | size = 50 264 | 265 | for i in range(240): 266 | for j in range(320): 267 | x_dist = abs(centre-i) 268 | y_dist = abs(centre-j) 269 | dist = math.sqrt(x_dist * x_dist + y_dist * y_dist) 270 | if dist < size: 271 | myImage[i,j] = 1 272 | 273 | plt.imshow(myImage, clim=(0,1),cmap="gray") 274 | plt.imshow(myImage, interpolation="bilinear", clim=(0,1), cmap="gray") 275 | plt.show() 276 | 277 | ``` 278 | 279 | ## Using numpy to calculate distances in high dimensions 280 | - This is actually a LOT slower than doing it by hand as above 281 | - But is super useful if you are wanting to compute distances 282 | - Particularly if your data has thousands of dimensions 283 | 284 | ```Python 285 | 286 | # This is actually five times slower 287 | # but it is easier to write if you are operating in high dimensions. 288 | 289 | # you could experiment with many more dimensions. Try adding more... 290 | positions=np.array([[30,40],[50,60]]) 291 | # using built in np function for normalised distance 292 | distance = np.linalg.norm(positions[0:1] - positions[1:2]) 293 | print (distance) 294 | 295 | ``` 296 | 297 | ## Drawing circles using built in numpy distance functions 298 | 299 | ```python 300 | myImage = np.zeros((240,320)) 301 | 302 | # uses numpy distance measures but is lots slower.. 303 | 304 | centre = 100 305 | size = 50 306 | 307 | for i in range(240): 308 | for j in range(320): 309 | positions=np.array([[i,j],[abs(centre-i),abs(centre-j)]]) 310 | distance = np.linalg.norm(positions[0:1] - positions[1:2]) 311 | if distance < size: 312 | myImage[i,j] = 1 313 | 314 | plt.imshow(myImage, clim=(0,1),cmap="gray") 315 | plt.imshow(myImage, interpolation="bilinear", clim=(0,1), cmap="gray") 316 | plt.show() 317 | 318 | ``` 319 | 320 | ## More complex expressions 321 | 322 | ```Python 323 | myImage = np.zeros((240,320)) 324 | 325 | # This code is exactly the same as the the code from our JS and C++ examples 326 | 327 | width_frequency = 3.14159/240 328 | 329 | frequency = width_frequency * 3 330 | 331 | for i in range(240): 332 | for j in range(320): 333 | t = math.sin(math.sqrt(i * i + j * j) * frequency) 334 | myImage[i,j]=t 335 | 336 | plt.imshow(myImage, clim=(0,1),cmap="gray") 337 | plt.imshow(myImage, interpolation="bilinear", clim=(0,1),cmap="gray") 338 | plt.show() 339 | 340 | ``` 341 | 342 | ## More More complex expressions 343 | ```Python 344 | myImage = np.zeros((240,320)) 345 | 346 | # This code is exactly the same as the the code from our JS and C++ examples 347 | 348 | PI = 3.14159 349 | 350 | width_frequency = 3.14159 / 320 351 | 352 | frequency = width_frequency * 5000 353 | 354 | for i in range(240): 355 | for j in range(320): 356 | t = math.tan(i / frequency) * math.cos(j / frequency) + math.atan(j / frequency) * math.cos(i / frequency); 357 | myImage[i,j]=t 358 | 359 | plt.imshow(myImage, clim=(0,1),cmap="gray") 360 | plt.imshow(myImage, interpolation="bilinear", clim=(0,1),cmap="gray") 361 | plt.show() 362 | 363 | ``` 364 | -------------------------------------------------------------------------------- /Week-5.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | Professor Mick Grierson 4 | 5 | ## Week 5 - Introduction to Python 6 | 7 | Over the past three months, I've introduced you primarily only to C-style programming approaches, and to C-style syntax. This has allowed us to get a strong grasp of the five fundamentals of programming - Variables, Conditionals, Loops, Functions, objects - using a syntax that you can more or less stick with. 8 | 9 | Sure, we had to move from var to let, and then to float, int, and then most recently a whole bunch of new types of variables (float, int, vec, long, double, short, bool). But throughout, we stuck with C-style syntax for conditionals (if, else), for loops, function definitions with arguments (across GLSL, C and C++), and then learned the class syntax in C++. 10 | 11 | This approach has worked very well for us so far. It's taken us from JavaScript all the way to low level languages, and we've even thought a bit about memory management. This is all great for performance hungry interactive applications. 12 | 13 | However, some types of computing can be challenging and slow to develop in low-level languages using C-Style syntax. In particular, if you are handling lots of text data, or doing other database-style work, C++ code can take longer to write than other, more flexible, less complex languages. 14 | 15 | This is where Python comes in. Python is great for running computations where real-time performance doesn't really matter. For example, if you're trying to analyse and process large amounts of data - be it sounds, images, video, text, or interact with data from various data sources (e.g. websites), produce graphs, or train machine learning models, it can take a lot less time if you do it in Python. So with this in mind, we're going to learn how to program in Python. This is going to be a bit tricky, because it is actually very very different to any of the other languages we've used. Comments are different, Syntax is totally different. Scope is weird. But it's going to be OK. 16 | 17 | But don't try to make interactive systems with video, graphics, audio and live interfaces in Python. It's a really terrible idea. Some people will tell you that it can be done. But they won't be able to tell you why you should, because there's no reason. 18 | 19 | So let's get started. 20 | 21 | ## What we will cover this session 22 | 23 | ### Getting started using Python 24 | 25 | - How to get Python. How to run Python. Different Python Versions. PIP. 26 | 27 | Python is installed by default on macOS. It is a bit more tricky to install on windows. But it's not too difficult. The easiest way to get Python is to use Anaconda. This is what we're going to do. We can install more python software using something called pip. We can also use conda. 28 | 29 | - Different ways of using Python - Using the terminal, using anaconda, using jupyter notebook. 30 | 31 | - On the command line 32 | 33 | You can use the terminal to launch Python and run simple tests. You can also save a series of python commands as a .py file and run it from the command line. You can also run a basic web server using python using SimpleHTTPServer (p2) or http.server (p3) 34 | 35 | - Differences between Python 2 and Python 3 36 | 37 | Print functions 38 | xrange / range 39 | string concatenation methods. 40 | 41 | ## Python programming basics. 42 | * Printing to the console, using quotes, indentation / blocks, string concatenation 43 | * variables : ints, floats, strings, lists, tuples (read-only lists), accessing data in lists, dictionaries 44 | * Basic operators - luckily this stuff is all the same! 45 | * conditionals, loops, functions 46 | * for letter in 'Python': print 'Current Letter :', letter 47 | * keywords, comments 48 | * import 49 | 50 | ## Lecture slides ! 51 | 52 | https://github.com/ual-cci/MSc-Coding-2/blob/master/python-intro.pdf 53 | 54 | ## Exercise - The Python Challenge! 55 | 56 | - The Python Challenge has been on the internet for decades. 57 | - It's a really great way to learn Python. 58 | - Work on your own or in groups to solve the first 5 parts of the Python Challenge. 59 | 60 | http://www.pythonchallenge.com 61 | 62 | ## HOMEWORK 63 | 64 | Working on your own, devise and create a simple riddle or puzzle inspired by the Python Challenge. You can: 65 | 66 | - Hide some information in a section of text 67 | - Or in an Image 68 | - Or in some other form of data 69 | - Then create a simple clue that can help someone else decode in Python. 70 | 71 | Next week, we can share our python riddles! 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /Week-6-1.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | Professor Mick Grierson 4 | 5 | ## Week 6 - Introduction to Python Continued 6 | 7 | Last week we did a crash course in Python. This was not as hard as it otherwise might have been as you've already got some solid JavaScript experience, and also spent some time messing around with C++. I'm sure you must still have a lot of questions (for example, how long will it be before I am proficient in these languages? Answer: quite a while), but in general you should at least feel like you understand how variables, conditionals, loops, functions and objects work in Python. To remind you, this is what we looked at: 8 | 9 | * Printing to the console, using quotes, indentation / blocks, string concatenation - in particular how this is different between Python 2 and 3. 10 | * variables : ints, floats, strings, lists, tuples (read-only lists), accessing data in lists, dictionaries. We also looked at using type(). 11 | * Basic operators 12 | * conditionals (if), loops (for), functions (def) 13 | - for letter in 'Python': print 'Current Letter :', letter 14 | * Python keywords, Python comments (#) 15 | * import statements. 16 | 17 | We also did the Python Challenge, which is an excellent mechanism for learning Python. If you haven't had a chance to get to the 5th challenge, that's OK, especially if you have never thought of creating a function that calls itself. I'll talk through the solutions but: 18 | 19 | Hackingnote's solutions for Python Challenge are actually really great, and include solutions that work well in Python 3: 20 | 21 | https://www.hackingnote.com/en/python-challenge-solutions/level-0 22 | 23 | In particular, I recommend that you all have a try at level 7 as it's lots of fun: 24 | 25 | https://www.hackingnote.com/en/python-challenge-solutions/level-7 26 | 27 | I also asked you to all create your own Python challenge riddle / question. Please send these to me! 28 | 29 | 30 | ## What we will cover this session 31 | 32 | This session is about common Python libraries and what they are used for. We will look at some basic examples for a small selection of essential tools that you need to know. We'll be looking at these roughly in order of complexity. However, just because they are complex technologies, doesn't mean they are necessarily hard to use. 33 | 34 | ## But first 35 | 36 | Often, you will come across an example, or a library, and all you will really want to know is what methods and functions are available. 37 | 38 | This is similar to when you are using an IDE, and you want to look up what member functions are available for an object. 39 | 40 | But how can you get more information about a specific python object or command? 41 | 42 | There are two main ways 43 | 44 | * help(something). Try the following: 45 | 46 | help(print), help(range), help(type) 47 | 48 | This returns actual python documentation. This is a more or less fully featured system that is designed to give you good information on what a Python object / function can do. 49 | 50 | * dir(something) 51 | 52 | dir() is very similar to help(), but only returns the attributes that belong to an object that you pass in as an argument. It's not as fully featured as help(), and it can be a bit random what it spits out from object to object, but it tells you what is actually there. Sometimes, this is different to what's in the documentation... 53 | 54 | You should be using help() or dir() whenever you're not sure how something works. Don't forget! 55 | 56 | ## Core libraries 57 | 58 | ### matplotlib 59 | 60 | https://matplotlib.org/ 61 | 62 | matplotlib is a library for plotting data using an approach similar to that which can be found in the popular research software Matlab. It is designed to allow you to create plots that are publication quality, but in general, it's just a great tool for seeing what you are doing. 63 | 64 | ```python 65 | # this is a bit weird and easy to forget. 66 | # here we are importing the pyplot functions as plt. 67 | # we're also importing math so we can do some trig. 68 | 69 | import matplotlib.pyplot as plt 70 | import math 71 | 72 | x = range(100) 73 | y = [] 74 | 75 | for value in x: 76 | y.append(math.sin(value * 0.1)) 77 | 78 | plt.plot(y) 79 | 80 | ``` 81 | There are lots of important core plotting features, including bar charts, pie charts, scatter plots etc. Take a look: 82 | 83 | https://matplotlib.org/gallery/index.html 84 | 85 | ### Numpy 86 | 87 | https://numpy.org/ 88 | 89 | Numpy is one of the most powerful and important Python packages. It is excellent for handling multidimensional arrays - e.g. large blocks of data - and has some impressive built in functions for doing vector processing and linear algebra. In general, if you are wanting to process large blocks of numbers, you should be using Numpy. 90 | 91 | Numpy arrays are much more powerful that Python lists. 92 | They allow you to create and manipulate arrays of information, such as large blocks of image data, and process it quickly. 93 | 94 | Quick intro to Numpy: 95 | 96 | ```python 97 | import numpy as np 98 | 99 | # creates an empty 1D array with 100 elements 100 | i = np.zeros([100]) 101 | 102 | # creates an empty 3D array with 5 * 5 * 5 elements 103 | x = np.zeros([5,5,5]) 104 | 105 | # creates a multidimensional array 3 * 2 by 2 blocks 106 | y = np.zeros([2,2]*3) 107 | 108 | print ("the shape of this array is ", np.shape(i)) 109 | 110 | print (i) 111 | 112 | print ("the shape of this array is ", np.shape(x)) 113 | 114 | print (x) 115 | 116 | print ("the shape of this array is ", np.shape(y)) 117 | 118 | print(y) 119 | 120 | z = np.arange(100).reshape(2, 5, 10) 121 | 122 | print(z) 123 | 124 | ``` 125 | 126 | https://numpy.org/devdocs/user/quickstart.html 127 | 128 | ### pandas 129 | 130 | To be honest, the main reason people use pandas is because it can read in Microsoft excel files and csv files. This makes it handy for people who naturally use excel to collect and organise data. 131 | 132 | There's a good tutorial on how to import and use excel documents in to Python here: 133 | 134 | https://www.dataquest.io/blog/excel-and-pandas/ 135 | 136 | And this cheatsheet is pretty great. 137 | 138 | https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf 139 | 140 | - https://pandas.pydata.org/docs/index.html 141 | 142 | ### urllib 143 | 144 | https://pythonspot.com/urllib-tutorial-python-3/ 145 | 146 | This is a really essential library for Python that you're going to use a lot. You can do lots of cool things that make scraping data much easier, including specifying your user agent, which basically means pretending to be any browser that you like. 147 | 148 | It's super easy to use urllib to grab a webpage : 149 | 150 | ```python 151 | import urllib.request 152 | 153 | html = urllib.request.urlopen('https://www.arts.ac.uk').read() 154 | print(html) 155 | 156 | ``` 157 | 158 | the 'html' variable / object in the above example now has all the data from the web page in it. But parsing HTML is not easy to do at all. Wouldn't it be great if there was some kind of library for parsing HTML easily? That would just be amazing. Oh wait... 159 | 160 | ### bs4 161 | 162 | bs4, or "Beautiful Soup" is a great html parser, and the basis of a very large number of web scraping softwares. If you're building a scraper, you should start with bs4. Here' an example of a script that grabs some webpage data and iterates through it using bs4. 163 | 164 | ```python 165 | # Get all the links from reddit world news. 166 | # Can you spider those links? 167 | 168 | from bs4 import BeautifulSoup 169 | import urllib.request 170 | 171 | html = urllib.request.urlopen('http://www.reddit.com/r/worldnews/').read() 172 | 173 | soup = BeautifulSoup(html) 174 | 175 | # just get all the links. Links are 'a' (as in ) 176 | 177 | for link in soup.find_all('a'): 178 | print(link.get('href')) 179 | 180 | ``` 181 | 182 | ### bokeh 183 | 184 | Bokeh is a great way of creating interactive plots. matplotlib isn't designed for interactive plot generation - it's for generating plots for books and academic papers. Bokeh on the other hand makes it super easy to make a plot that you can interact with on a webpage. Like this : 185 | 186 | ```python 187 | from bokeh.plotting import figure, output_file, show 188 | 189 | # prepare some data 190 | x = [1, 2, 3, 4, 5] 191 | y = [6, 7, 2, 4, 5] 192 | 193 | # output to static HTML file 194 | output_file("lines.html") 195 | 196 | # create a new plot with a title and axis labels 197 | p = figure(title="simple line example", x_axis_label='x', y_axis_label='y') 198 | 199 | # add a line renderer with legend and line thickness 200 | p.line(x, y, legend="Temp.", line_width=2) 201 | 202 | # show the results 203 | show(p) 204 | 205 | ``` 206 | 207 | https://docs.bokeh.org/en/latest/docs/user_guide/quickstart.html#userguide-quickstart 208 | 209 | 210 | ### gensim 211 | 212 | https://radimrehurek.com/gensim/ 213 | 214 | Gensim is a general purpose Topic modelling and Natural Language Processing library with sentiment analysis, word-vectors, and lots of very useful topic modelling toolkits, such as Latent Semantic Analysis (LSA) and Latent Dirichlit Allocation (LDA - https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation). 215 | 216 | It's looking a bit old by modern standards, so you might also want to take a look at the next item in our list! 217 | 218 | ### Transformers 219 | 220 | https://github.com/huggingface/transformers 221 | 222 | Hugging Face's Transformers library is an incredible library of contemporary Natural Language Processing tools. For contemporary autosummarisation, text style transfer, and other quite startling NLP demos, check out the website below. 223 | 224 | https://huggingface.co/models 225 | 226 | For example, you can create a google colab notebook, import huggingface, and do an autosummarisation very easily. Here's some code that gets you started with this: 227 | 228 | ```python 229 | 230 | !pip install transformers 231 | from transformers import pipeline 232 | 233 | # Open and read the article - if using colab, you can upload text to the sample_data folder, or connect your google drive. 234 | f = open("article.txt", "r", encoding="utf8") 235 | to_tokenize = f.read() 236 | 237 | # Initialize the HuggingFace summarization pipeline 238 | summarizer = pipeline("summarization") 239 | summarized = summarizer(to_tokenize, min_length=75, max_length=300) 240 | 241 | # Print summarized text 242 | print(summarized) 243 | 244 | ``` 245 | 246 | # Exercise 247 | 248 | - Working in groups, build a simple webscraper that scrapes a bunch of documents from the internet and creates a summary of each one. One way to do this is to start with a bunch of pages, and then follow all the links on those pages, scraping new documents as you go. Then you can record each link with a quick summary in a list. 249 | 250 | - You can use any method you like to do the summary - you could just select a couple of sentences, or use some form of summarisation tool (Huggingface has one, Gensim might have one, or you could make one up). 251 | 252 | - If you manage to achieve this, extract keywords by counting the total number of similar words from all the different documents and see if any are more popular than others. Search for documents that contain those keywords using Python and then summarise those documents too! 253 | 254 | # Homework - take a look at the notebook in this repository called Week-6-Exercise-intro-to-image-data-and-tensorflow. :-) 255 | -------------------------------------------------------------------------------- /Week-6-treats/NumPy_SciPy_Pandas_Quandl_Cheat_Sheet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/Week-6-treats/NumPy_SciPy_Pandas_Quandl_Cheat_Sheet.pdf -------------------------------------------------------------------------------- /Week-6-treats/Numpy_Python_Cheat_Sheet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/Week-6-treats/Numpy_Python_Cheat_Sheet.pdf -------------------------------------------------------------------------------- /Week-6-treats/Python3_reference_cheat_sheet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/Week-6-treats/Python3_reference_cheat_sheet.pdf -------------------------------------------------------------------------------- /Week-6-treats/numpy.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/Week-6-treats/numpy.docx -------------------------------------------------------------------------------- /Week-7-notebooks/Week-7-MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from __future__ import absolute_import, division, print_function, unicode_literals\n", 10 | "import tensorflow as tf" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "#Once you have run this on your own system, you can try it on colab here:\n", 20 | "\n", 21 | "# https://www.tensorflow.org/tutorials/quickstart/beginner" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 3, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "mnist = tf.keras.datasets.mnist\n", 31 | "\n", 32 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", 33 | "x_train, x_test = x_train / 255.0, x_test / 255.0\n" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 4, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "model = tf.keras.models.Sequential([\n", 43 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", 44 | " tf.keras.layers.Dense(128, activation='relu'),\n", 45 | " tf.keras.layers.Dropout(0.2),\n", 46 | " tf.keras.layers.Dense(10)\n", 47 | "])\n" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 5, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "data": { 57 | "text/plain": [ 58 | "array([[-0.19896734, 0.1846184 , 0.6152543 , 0.98006797, 0.06164537,\n", 59 | " -0.9810164 , 0.13198064, -0.02699777, 0.5831794 , 0.4314483 ]],\n", 60 | " dtype=float32)" 61 | ] 62 | }, 63 | "execution_count": 5, 64 | "metadata": {}, 65 | "output_type": "execute_result" 66 | } 67 | ], 68 | "source": [ 69 | "predictions = model(x_train[:1]).numpy()\n", 70 | "predictions" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 6, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "data": { 80 | "text/plain": [ 81 | "array([[0.06106548, 0.08961587, 0.1378503 , 0.19853829, 0.07924619,\n", 82 | " 0.02793548, 0.08502069, 0.07252391, 0.13349892, 0.11470485]],\n", 83 | " dtype=float32)" 84 | ] 85 | }, 86 | "execution_count": 6, 87 | "metadata": {}, 88 | "output_type": "execute_result" 89 | } 90 | ], 91 | "source": [ 92 | "tf.nn.softmax(predictions).numpy()\n" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 7, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 8, 107 | "metadata": {}, 108 | "outputs": [ 109 | { 110 | "data": { 111 | "text/plain": [ 112 | "3.5778577" 113 | ] 114 | }, 115 | "execution_count": 8, 116 | "metadata": {}, 117 | "output_type": "execute_result" 118 | } 119 | ], 120 | "source": [ 121 | "loss_fn(y_train[:1], predictions).numpy()\n" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 9, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "model.compile(optimizer='adam',\n", 131 | " loss=loss_fn,\n", 132 | " metrics=['accuracy'])\n" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 10, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "name": "stdout", 142 | "output_type": "stream", 143 | "text": [ 144 | "Epoch 1/5\n", 145 | "1875/1875 [==============================] - 2s 758us/step - loss: 0.4745 - accuracy: 0.8643\n", 146 | "Epoch 2/5\n", 147 | "1875/1875 [==============================] - 1s 724us/step - loss: 0.1497 - accuracy: 0.9561\n", 148 | "Epoch 3/5\n", 149 | "1875/1875 [==============================] - 1s 705us/step - loss: 0.1113 - accuracy: 0.9669\n", 150 | "Epoch 4/5\n", 151 | "1875/1875 [==============================] - 1s 713us/step - loss: 0.0859 - accuracy: 0.9727\n", 152 | "Epoch 5/5\n", 153 | "1875/1875 [==============================] - 2s 851us/step - loss: 0.0731 - accuracy: 0.9777\n" 154 | ] 155 | }, 156 | { 157 | "data": { 158 | "text/plain": [ 159 | "" 160 | ] 161 | }, 162 | "execution_count": 10, 163 | "metadata": {}, 164 | "output_type": "execute_result" 165 | } 166 | ], 167 | "source": [ 168 | "model.fit(x_train, y_train, epochs=5)\n" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 13, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "probability_model = tf.keras.Sequential([\n", 178 | " model,\n", 179 | " tf.keras.layers.Softmax()\n", 180 | "])\n" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 26, 186 | "metadata": {}, 187 | "outputs": [ 188 | { 189 | "data": { 190 | "text/plain": [ 191 | "" 207 | ] 208 | }, 209 | "execution_count": 26, 210 | "metadata": {}, 211 | "output_type": "execute_result" 212 | } 213 | ], 214 | "source": [ 215 | "probability_model(x_test[:5])\n", 216 | "\n" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": null, 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [] 225 | } 226 | ], 227 | "metadata": { 228 | "kernelspec": { 229 | "display_name": "Python 3", 230 | "language": "python", 231 | "name": "python3" 232 | }, 233 | "language_info": { 234 | "codemirror_mode": { 235 | "name": "ipython", 236 | "version": 3 237 | }, 238 | "file_extension": ".py", 239 | "mimetype": "text/x-python", 240 | "name": "python", 241 | "nbconvert_exporter": "python", 242 | "pygments_lexer": "ipython3", 243 | "version": "3.8.5" 244 | } 245 | }, 246 | "nbformat": 4, 247 | "nbformat_minor": 2 248 | } 249 | -------------------------------------------------------------------------------- /Week-7-notebooks/animation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/Week-7-notebooks/animation.gif -------------------------------------------------------------------------------- /Week-8.md: -------------------------------------------------------------------------------- 1 | # Coding 2 : Advanced Frameworks 2 | 3 | # Week 8: Using deep neural networks to classify and generate images 4 | 5 | Professor Mick Grierson 6 | 7 | ## Part one : Recap 8 | 9 | Last session we focussed on trying to understand what a simple machine learning system was, and how you could build a very basic one. We learned a lot of things. Some of the most important things we went through were as follows: 10 | 11 | - We tend to use Neural Networks to try to transform data without us having to change parameters manually. 12 | - For example, we might want to build a system that can automatically change input values in order to meet some condition, e.g. making an output value get higher. 13 | - We can do this by measuring the distance between the output and the desired output, then passing a value derived from this distance back through the network's parameters. 14 | 15 | - This works with single input systems, but is even more powerful if we can have more inputs, so we chain them together 16 | - We do all this by adjusting the inputs by a small amount, and getting the derivative, which is the difference between input and output for different parameters of the system, scaled by a small amount (we move slowly towards the target..) 17 | 18 | ![Gradient Descent](https://upload.wikimedia.org/wikipedia/commons/a/a3/Gradient_descent.gif) 19 | 20 | - When we get all the derivatives together, we get a gradient. This can be thought of as containing direction. 21 | 22 | - We use the gradient to adjust the parameters of the network. The difference propagates back through the network. 23 | - Input data flows forward through nodes in a graph - this is the forward pass 24 | - We calculate the gradient and use this to adjust the network parameters - this is the backward pass. 25 | - We call this "Back Propagation." 26 | - We can add and squash the output of a bunch of Neurons together with an activation function, such as a sigmoid. 27 | - We can use this to turn a Neuron into a smoothed-switch with a maximum of 1 and a minimum of 0. 28 | 29 | # Last week's exercises 30 | 31 | - I asked you to go through an introductory notebook that shows you how to do basic signal processing on a batch of images. 32 | - I then asked you to try to do some type of transformation on the image dataset (something you devised yourself). 33 | - I then asked you to modify the notebook so that you ran the process on a totally different batch of images that you sourced yourself. 34 | - Getting a dataset is one of the most important, and also simultaneously most annoying aspects of machine learning. 35 | 36 | 37 | 38 | ## This session 39 | 40 | - Today we are going to do three things 41 | - First, we are going to look at how large ensembles of neurons can classify an image, and how we can structure such systems 42 | - Second, we are going to look at a few interesting Creative uses of these types of systems 43 | - Finally, we're going to learn more by exploring a Jupyter notebook from Parag Mital's Creative Applications of Deep Learning course, which allows us to build simple networks for the first time. 44 | 45 | # Part Two - Using deep learning to classify images. 46 | 47 | - Take a look at this demo here from Terence Broad: 48 | - https://blog.terencebroad.com/archive/convnetvis/vis.html 49 | - This is a simple visualisation of a real CNN classifying a handwritten image 50 | - The input image flows through layers of a trained network 51 | - The 'paths' through the different layers show how different aspects of the image are classified by different layers by the network. 52 | - Let's actually explore a simple program that actually trains this system. 53 | 54 | # Exercise 1 55 | 56 | In groups we're going to do an exercise looking at this MNIST notebook here: 57 | 58 | https://github.com/ual-cci/MSc-Coding-2/blob/master/Week-7-notebooks/Week-7-MNIST.ipynb 59 | 60 | Try to understand what each line is doing with reference to what we described in the lecture. Remember, this is creating a large network layer by layer. 61 | 62 | ## Exercise 2 : Image Fun with tensorflow 63 | 64 | - You will have noticed that you can run the MNIST training demo on Google's colab platform. 65 | 66 | This is useful if you can't get notebooks to work, or are having issues using tensorflow. 67 | 68 | https://colab.research.google.com/notebooks/intro.ipynb 69 | 70 | - Colab can be useful if you are wanting to try things out quickly 71 | - However, it's not necessarily very good if you are trying to do complex things that require long periods of training 72 | - But you really should try it out! 73 | 74 | Lots of people are very excited about using deep learning to generate images. 75 | Two popular techniques are style transfer and using generative image models, such as Generative Adversarial Networks (GANs). 76 | - Have a look at the following demos from Google on Style Transfer and GANs. You will be learning more about these next term so these are just to whet your appetite: 77 | - You will need to install tensorflow hub to use these demos. From the terminal: 78 | ```pip install tensorflow_hub``` 79 | - https://github.com/ual-cci/MSc-Coding-2/blob/master/Week-7-notebooks/tf2_arbitrary_image_stylization.ipynb 80 | - https://github.com/ual-cci/MSc-Coding-2/blob/master/Week-7-notebooks/tf_hub_generative_image_module.ipynb 81 | - Use your own images to create your own style transfer and GAN outputs if you can. 82 | - Check out some of the other examples: 83 | https://www.tensorflow.org/hub/tutorials 84 | 85 | # Part Three : Lecture 86 | 87 | ## How neural networks are structured: 88 | 89 | - Different NN systems have different structures 90 | - There are many different features these structures 91 | - One good example of such a system is a what is called 92 | - a "Fully connected network" 93 | 94 | ## Fully Connected networks 95 | - It's super easy to understand a fully connected network 96 | - All this means is that every node in one layer is connected to every node in the next layer 97 | - This makes a massive web of connections 98 | 99 | ## What are the layers doing? 100 | - We start with a bunch of inputs at the first layer, one for each pixel, for example. 101 | - We might want to downsample the image first, and convert it to greyscale. We don't need colour to work out what number it is. 102 | - Then we want to have a bunch of outputs. In the example of the MNIST handwritten digit classification, there are 10 outputs for 10 numbers (0-9). 103 | - Then, we have what are called 'hidden layers' in-between. 104 | - They are 'hidden' because we don't monitor their inputs or outputs directly in normal circumstances. 105 | 106 | ## Seriously, what do the layers do? 107 | 108 | - The input layers and hidden layers divide up the task of working out what the image representation needs to be. 109 | - They do this literally by adding up the values from different parts of the image. 110 | - This is one reason why the dataset is normalised in terms of size and orientation. 111 | - However this isn't always necessary. Just mostly necessary! 112 | 113 | ## No but really... 114 | - First layers might work out some of the edges 115 | - The hidden layers then spread out the job of representing different aspects of the image input. These are basically different blocks of pixels - just like in Terence's demo. 116 | - This leads to the output layer receiving a collection of activiations which it can use to determine what the input image is. 117 | 118 | ## Weights, Biases, Activation 119 | - A trained network is really just a really big grid of weights (values). 120 | - These weights have been arrived at through the training process, and just tell you what the values of the pixels are for a particular part of the image 121 | - The training process simply compares the outputs to the input, works out the difference, and propagates this back to adjust the weights a tiny amount in the direction represented by the gradient. 122 | - Also, you might want to make it harder for the network to get activated. This is useful to help make sure the nodes in the network only activate when there's a strong response. 123 | - To do this, you can add a bias - usually a negative number - before you pass the output through a sigmoid or rectified linear unit activation function (RELU) 124 | - RELU is loads better than sigmoid because it's simple. 125 | - RELU is just converts the neuron's combined sum of inputs by weights, with bias, into a diagonal line from zero. Parag mentions it in his second week session, further down this page. 126 | 127 | ## SO... 128 | 129 | - Each neuron takes all the inputs from the previous layer 130 | - It then spits out a number between 0 and 1 that is a combination of all the high or low activation in the prior layer pushed in to a function. 131 | - This is all just a simple function, that is part of a larger function - the NN is just a function approximator made up of lots of other functions. 132 | - Here is a surprisingly good explanation that is pretty easy to grasp: 133 | 134 | https://www.youtube.com/watch?v=aircAruvnKk 135 | 136 | # Homework 137 | - Now that we've looked in detail at the whole process 138 | - You should be able to tackle this notebook from Parag Mital. It shows you how to design a Neural Network to generate a new image from scratch based on a separate image. It also reinforces some of the learning we've been through and adds further detail. 139 | 140 | https://github.com/ual-cci/MSc-Coding-2/blob/master/Week-7-notebooks/Week-7-CADL-Lecture2.ipynb 141 | 142 | - 143 | -------------------------------------------------------------------------------- /hugging-face-summarise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "c2de273d", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "/opt/homebrew/Caskroom/miniforge/base/envs/tf/lib/python3.8/site-packages/torch/_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.\n", 14 | "To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at /tmp/pip-req-build-jhk0fkj8/aten/src/ATen/native/BinaryOps.cpp:467.)\n", 15 | " return torch.floor_divide(self, other)\n" 16 | ] 17 | } 18 | ], 19 | "source": [ 20 | "from transformers import pipeline\n", 21 | "\n", 22 | "# Open and read the article\n", 23 | "f = open(\"article.txt\", \"r\", encoding=\"utf8\")\n", 24 | "to_tokenize = f.read()\n", 25 | "\n", 26 | "# Initialize the HuggingFace summarization pipeline\n", 27 | "summarizer = pipeline(\"summarization\")\n", 28 | "summarized = summarizer(to_tokenize, min_length=75, max_length=300)\n", 29 | "\n", 30 | "# Print summarized text\n", 31 | "print(summarized)" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "c8c8cba3", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "id": "a1d8769e", 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [] 49 | } 50 | ], 51 | "metadata": { 52 | "kernelspec": { 53 | "display_name": "Python 3 (ipykernel)", 54 | "language": "python", 55 | "name": "python3" 56 | }, 57 | "language_info": { 58 | "codemirror_mode": { 59 | "name": "ipython", 60 | "version": 3 61 | }, 62 | "file_extension": ".py", 63 | "mimetype": "text/x-python", 64 | "name": "python", 65 | "nbconvert_exporter": "python", 66 | "pygments_lexer": "ipython3", 67 | "version": "3.8.10" 68 | } 69 | }, 70 | "nbformat": 4, 71 | "nbformat_minor": 5 72 | } 73 | -------------------------------------------------------------------------------- /in-class-exercise-assessments.md: -------------------------------------------------------------------------------- 1 | 2 | # MSc Coding 2 - In-class assignment submission. 3 | 4 | ## Instructions - submit only 4 in-class exercises! 5 | 6 | - Choose what you consider to be your 4 best in-class exercises that you have completed as part of Coding 2. 7 | - Submit them in CODE format with an associated README file (one README per exercise), and any associated output image files. You can submit .cpp + .hpp files, iPython notebooks, any other code in text form, and example PNG/JPEGS (images in any format). 8 | - For each of the 4 exercises, your readme should include a 250 word description of what you did, what you feel you learned, and most importantly, how the code should be run. 9 | - Submit your 4 in-class assignments via MOODLE alongside the 4 README files in a zip file, with legible filenames. 10 | - If you have git repos for your projects, please simply include a link to these in your zip file. 11 | 12 | Here are details of the in-class exercises you were asked to complete. Of course, you should have spent some of your own time on these after each class, and so hopefully it won't take you too long to put the submission together. 13 | 14 | ## Week 2 Exercise - Your first OF project 15 | 16 | - Select a JavaScript project you completed last term and port it to C++ using openFrameworks 17 | 18 | ## Week 3 Exercise - The Python Challenge! 19 | 20 | - http://www.pythonchallenge.com 21 | 22 | - Submit your python challenge solutions to the first 7 challenges 23 | 24 | ## Week 4 Exercise - Python webscraper 25 | 26 | - Build a simple webscraper that scrapes a set of documents from the internet and summarises them using gensim. 27 | - If you manage to achieve this, extract keywords from all the different documents and see if any are more popular than others. 28 | - Search for documents that contain those keywords using Python and then summarise those documents too. 29 | 30 | ## Week 5 Exercise - Neural Networks by hand 31 | 32 | - In week 5 we created a simple toy neuron by hand 33 | - This should have left you with enough information to create a single later of neurons 34 | - If you managed to do this, you may submit this as one of your in-class assignments. 35 | 36 | ## Week 6 Exercise - Signal Processing in Numpy and Tensorflow 37 | 38 | - NOTEBOOK: https://github.com/ual-cci/MSc-Coding-2/blob/master/Week-6-Exercise-intro-to-image-data-and-tensorflow.ipynb 39 | - Make a version of the Notebook with at least one major difference that you have introduced yourself (as follows): 40 | - First, you must do some transformation on the image dataset that isn't included in the above document. You must use numpy to do this transformation. 41 | - If you manage to do this, your next task is to collect and process your own dataset instead of the one provided. 42 | 43 | ## Week 7 Exercise - Using TensorFlow to create outputs 44 | 45 | - Exercise 2 46 | - Have a look at the following demos: 47 | - https://research.google.com/seedbank/seed/neural_style_transfer_with_tfkeras 48 | - https://research.google.com/seedbank/seed/deepdream 49 | - Use your own images to create your own style transfer and deep dream outputs. 50 | - Submit your image outputs. If you make any changes to the code, submit your own version of the notebooks, highlighting the changes you made using comments. 51 | - You should also try to get the code running on your own machine. If you do this, submit the code that runs on your own device. 52 | 53 | ## Week 8 Exercise 54 | 55 | - Submit a port of any of the above exercises that will run on the Raspberry PI 4. 56 | -------------------------------------------------------------------------------- /information-on-presentation-requirements.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/information-on-presentation-requirements.pdf -------------------------------------------------------------------------------- /python-intro.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/python-intro.pdf -------------------------------------------------------------------------------- /types.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/types.png -------------------------------------------------------------------------------- /week1-code/MyFirstClass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/week1-code/MyFirstClass.png -------------------------------------------------------------------------------- /week1-code/main.cpp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/week1-code/main.cpp.png -------------------------------------------------------------------------------- /week1-code/readme.md: -------------------------------------------------------------------------------- 1 | # What is this? 2 | 3 | Well, here I've left some screenshots of the example we created in class. 4 | 5 | # But Mick, this is github. Why didn't you post the code? 6 | 7 | Because I want you to type it in yourself. 8 | 9 | -------------------------------------------------------------------------------- /week2-more-examples/Artificial-life/boid.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * boid.cpp 3 | * boids 4 | * 5 | * Created by Marco Gillies on 05/10/2010. 6 | * Copyright 2010 Goldsmiths, University of London. All rights reserved. 7 | * 8 | */ 9 | 10 | #include "boid.h" 11 | #include "ofMain.h" 12 | 13 | Boid::Boid() 14 | { 15 | separationWeight = 1.0f; 16 | cohesionWeight = 0.2f; 17 | alignmentWeight = 0.1f; 18 | 19 | separationThreshold = 15; 20 | neighbourhoodSize = 100; 21 | 22 | position = ofVec3f(ofRandom(0, 200), ofRandom(0, 200)); 23 | velocity = ofVec3f(ofRandom(-2, 2), ofRandom(-2, 2)); 24 | } 25 | 26 | Boid::Boid(ofVec3f &pos, ofVec3f &vel) 27 | { 28 | separationWeight = 1.0f; 29 | cohesionWeight = 0.2f; 30 | alignmentWeight = 0.1f; 31 | 32 | separationThreshold = 15; 33 | neighbourhoodSize = 100; 34 | 35 | position = pos; 36 | velocity = vel; 37 | } 38 | 39 | Boid::~Boid() 40 | { 41 | 42 | } 43 | 44 | float Boid::getSeparationWeight() 45 | { 46 | return separationWeight; 47 | } 48 | float Boid::getCohesionWeight() 49 | { 50 | return cohesionWeight; 51 | } 52 | 53 | float Boid::getAlignmentWeight() 54 | { 55 | return alignmentWeight; 56 | } 57 | 58 | 59 | float Boid::getSeparationThreshold() 60 | { 61 | return separationThreshold; 62 | } 63 | 64 | float Boid::getNeighbourhoodSize() 65 | { 66 | return neighbourhoodSize; 67 | } 68 | 69 | 70 | void Boid::setSeparationWeight(float f) 71 | { 72 | separationWeight = f; 73 | } 74 | void Boid::setCohesionWeight(float f) 75 | { 76 | cohesionWeight = f; 77 | } 78 | 79 | void Boid::setAlignmentWeight(float f) 80 | { 81 | alignmentWeight = f; 82 | } 83 | 84 | 85 | void Boid::setSeparationThreshold(float f) 86 | { 87 | separationThreshold = f; 88 | } 89 | 90 | void Boid::setNeighbourhoodSize(float f) 91 | { 92 | neighbourhoodSize = f; 93 | } 94 | 95 | 96 | ofVec3f Boid::getPosition() 97 | { 98 | return position; 99 | } 100 | 101 | ofVec3f Boid::getVelocity() 102 | { 103 | return velocity; 104 | } 105 | 106 | ofVec3f Boid::separation(std::vector &otherBoids) 107 | { 108 | // finds the first collision and avoids that 109 | // should probably find the nearest one 110 | // can you figure out how to do that? 111 | for (int i = 0; i < otherBoids.size(); i++) 112 | { 113 | if(position.distance(otherBoids[i]->getPosition()) < separationThreshold) 114 | { 115 | ofVec3f v = position - otherBoids[i]->getPosition(); 116 | v.normalize(); 117 | return v; 118 | } 119 | } 120 | } 121 | 122 | ofVec3f Boid::cohesion(std::vector &otherBoids) 123 | { 124 | ofVec3f average(0,0,0); 125 | int count = 0; 126 | for (int i = 0; i < otherBoids.size(); i++) 127 | { 128 | if (position.distance(otherBoids[i]->getPosition()) < neighbourhoodSize) 129 | { 130 | average += otherBoids[i]->getPosition(); 131 | count += 1; 132 | } 133 | } 134 | average /= count; 135 | ofVec3f v = average - position; 136 | v.normalize(); 137 | return v; 138 | } 139 | 140 | ofVec3f Boid::alignment(std::vector &otherBoids) 141 | { 142 | ofVec3f average(0,0,0); 143 | int count = 0; 144 | for (int i = 0; i < otherBoids.size(); i++) 145 | { 146 | if (position.distance(otherBoids[i]->getPosition()) < neighbourhoodSize) 147 | { 148 | average += otherBoids[i]->getVelocity(); 149 | count += 1; 150 | } 151 | } 152 | average /= count; 153 | ofVec3f v = average - velocity; 154 | v.normalize(); 155 | return v; 156 | } 157 | 158 | void Boid::update(std::vector &otherBoids, ofVec3f &min, ofVec3f &max) 159 | { 160 | velocity += separationWeight*separation(otherBoids); 161 | velocity += cohesionWeight*cohesion(otherBoids); 162 | velocity += alignmentWeight*alignment(otherBoids); 163 | 164 | walls(min, max); 165 | position += velocity; 166 | } 167 | 168 | void Boid::walls(ofVec3f &min, ofVec3f &max) 169 | { 170 | if (position.x < min.x){ 171 | position.x = min.x; 172 | velocity.x *= -1; 173 | } else if (position.x > max.x){ 174 | position.x = max.x; 175 | velocity.x *= -1; 176 | } 177 | 178 | if (position.y < min.y){ 179 | position.y = min.y; 180 | velocity.y *= -1; 181 | } else if (position.y > max.y){ 182 | position.y = max.y; 183 | velocity.y *= -1; 184 | } 185 | 186 | 187 | } 188 | 189 | void Boid::draw() 190 | { 191 | ofSetColor(0, 255, 255); 192 | ofCircle(position.x, position.y, 5); 193 | } 194 | -------------------------------------------------------------------------------- /week2-more-examples/Artificial-life/boid.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | * 4 | */ 5 | 6 | #ifndef _BOID 7 | #define _BOID 8 | #include 9 | #include "ofMain.h" 10 | 11 | class Boid 12 | { 13 | // all the methods and variables after the 14 | // private keyword can only be used inside 15 | // the class 16 | private: 17 | ofVec3f position; 18 | ofVec3f velocity; 19 | 20 | float separationWeight; 21 | float cohesionWeight; 22 | float alignmentWeight; 23 | 24 | float separationThreshold; 25 | float neighbourhoodSize; 26 | 27 | ofVec3f separation(std::vector &otherBoids); 28 | ofVec3f cohesion(std::vector &otherBoids); 29 | ofVec3f alignment(std::vector &otherBoids); 30 | 31 | // all the methods and variables after the 32 | // public keyword can only be used by anyone 33 | public: 34 | Boid(); 35 | Boid(ofVec3f &pos, ofVec3f &vel); 36 | 37 | ~Boid(); 38 | 39 | ofVec3f getPosition(); 40 | ofVec3f getVelocity(); 41 | 42 | 43 | float getSeparationWeight(); 44 | float getCohesionWeight(); 45 | float getAlignmentWeight(); 46 | 47 | float getSeparationThreshold(); 48 | float getNeighbourhoodSize(); 49 | 50 | void setSeparationWeight(float f); 51 | void setCohesionWeight(float f); 52 | void setAlignmentWeight(float f); 53 | 54 | void setSeparationThreshold(float f); 55 | void setNeighbourhoodSize(float f); 56 | 57 | void update(std::vector &otherBoids, ofVec3f &min, ofVec3f &max); 58 | 59 | void walls(ofVec3f &min, ofVec3f &max); 60 | 61 | void draw(); 62 | }; 63 | 64 | #endif 65 | -------------------------------------------------------------------------------- /week2-more-examples/Artificial-life/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "testApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofSetupOpenGL(1024,768, OF_WINDOW); // <-------- setup the GL context 8 | 9 | // this kicks off the running of my app 10 | // can be OF_WINDOW or OF_FULLSCREEN 11 | // pass in width and height too: 12 | ofRunApp( new testApp()); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /week2-more-examples/Artificial-life/testApp.cpp: -------------------------------------------------------------------------------- 1 | #include "testApp.h" 2 | 3 | testApp::~testApp() 4 | { 5 | for (int i = 0; i < boids.size(); i++) 6 | { 7 | delete boids[i]; 8 | } 9 | } 10 | 11 | //-------------------------------------------------------------- 12 | void testApp::setup(){ 13 | 14 | 15 | int screenW = ofGetScreenWidth(); 16 | int screenH = ofGetScreenHeight(); 17 | 18 | ofBackground(0,50,50); 19 | 20 | // set up the boids 21 | for (int i = 0; i < 50; i++) 22 | boids.push_back(new Boid()); 23 | 24 | } 25 | 26 | 27 | //-------------------------------------------------------------- 28 | void testApp::update(){ 29 | 30 | ofVec3f min(0, 0); 31 | ofVec3f max(ofGetWidth(), ofGetHeight()); 32 | for (int i = 0; i < boids.size(); i++) 33 | { 34 | boids[i]->update(boids, min, max); 35 | } 36 | } 37 | 38 | //-------------------------------------------------------------- 39 | void testApp::draw(){ 40 | 41 | for (int i = 0; i < boids.size(); i++) 42 | { 43 | boids[i]->draw(); 44 | } 45 | 46 | } 47 | 48 | 49 | //-------------------------------------------------------------- 50 | void testApp::keyPressed(int key){ 51 | 52 | } 53 | 54 | //-------------------------------------------------------------- 55 | void testApp::keyReleased(int key){ 56 | 57 | } 58 | 59 | //-------------------------------------------------------------- 60 | void testApp::mouseMoved(int x, int y ){ 61 | 62 | } 63 | 64 | //-------------------------------------------------------------- 65 | void testApp::mouseDragged(int x, int y, int button){ 66 | 67 | } 68 | 69 | //-------------------------------------------------------------- 70 | void testApp::mousePressed(int x, int y, int button){ 71 | 72 | } 73 | 74 | //-------------------------------------------------------------- 75 | void testApp::mouseReleased(int x, int y, int button){ 76 | 77 | } 78 | 79 | //-------------------------------------------------------------- 80 | void testApp::windowResized(int w, int h){ 81 | 82 | } 83 | -------------------------------------------------------------------------------- /week2-more-examples/Artificial-life/testApp.h: -------------------------------------------------------------------------------- 1 | #ifndef _TEST_APP 2 | #define _TEST_APP 3 | 4 | 5 | #include "ofMain.h" 6 | #include 7 | #include "boid.h" 8 | 9 | class testApp : public ofBaseApp{ 10 | 11 | public: 12 | ~testApp(); 13 | 14 | void setup(); 15 | void update(); 16 | void draw(); 17 | 18 | void keyPressed(int key); 19 | void keyReleased(int key); 20 | void mouseMoved(int x, int y ); 21 | void mouseDragged(int x, int y, int button); 22 | void mousePressed(int x, int y, int button); 23 | void mouseReleased(int x, int y, int button); 24 | void windowResized(int w, int h); 25 | 26 | std::vector boids; 27 | 28 | }; 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /week2-more-examples/Fractals/Julia/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup(){ 5 | 6 | z,zx,zy=0;//So these are variables that hold the absolute value of z, and the x and y coordinates that we use to represent z 7 | cx,cy,x,y=0;//These are variables we use to represent the complex number c - which is a pixel location. 8 | maxIterations=20;//We calculate the fractal by running a test over and over again for each pixel. 9 | res=2; 10 | 11 | 12 | } 13 | 14 | //-------------------------------------------------------------- 15 | void ofApp::update(){ 16 | 17 | } 18 | 19 | //-------------------------------------------------------------- 20 | void ofApp::draw(){ 21 | ofTranslate(600, 450); 22 | //We are going to test every single pixel on the screen, so we need nested for loops. 23 | for (int i=-600;i<600;i+=res) {//for every pixel in the x dimension (columns)... 24 | for (int j=-450;j<450;j+=res) {//...run through all the y pixels 25 | zx=(float)i/300.0;//this is going to create a Julia set 26 | zy=(float)j/225.0; 27 | cx=(mouseX/300.0)-2.0;//adjust the values of x so that it is between -2 and 2, as this is where the mandelbrot is. Otherwise we'd be searching around for ages. 28 | cy=(mouseY/225.0)-2.0;//same for y. These two lines have given us the complex number c, which is just (cx,cy) 29 | for (int test=0;testthresh) { 18 | 19 | pixOut[i]=pixIn[i]; 20 | 21 | } else { 22 | 23 | pixOut[i]=0; 24 | 25 | } 26 | } 27 | 28 | return pixOut; 29 | } 30 | 31 | ofPixels imageProc::iThreshRBG(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, int thresh) { 32 | 33 | for (int i =0;i thresh) { 81 | 82 | pixOut[i]=pixOut[i+1]=pixOut[i+2]=0; 83 | 84 | } 85 | 86 | } 87 | 88 | return pixOut; 89 | } 90 | 91 | 92 | // Assumes RGB 93 | // 0.2126*R + 0.7152*G + 0.0722*B 94 | ofPixels imageProc::greyscale(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels) { 95 | 96 | for (int i =0;i= 0 && x * y <= sizeH*sizeW*channels) { 273 | 274 | pixOut[coll+(j*channels)]=pixIn[(y*sizeW*channels)+x*channels]; 275 | pixOut[coll+(j*channels)+1]=pixIn[(y*sizeW*channels)+(x*channels)+1]; 276 | pixOut[coll+(j*channels)+2]=pixIn[(y*sizeW*channels)+(x*channels)+2]; 277 | 278 | } 279 | 280 | } 281 | 282 | } 283 | 284 | return pixOut; 285 | } 286 | 287 | ofPixels imageProc::rotate(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float theta, int anchorX, int anchorY, int offsetX, int offsetY, float zoomX, float zoomY) { 288 | 289 | int totalSize = sizeH*sizeW*channels; 290 | 291 | theta = fmod(theta,TWO_PI); 292 | 293 | for (int i=0;i= 0 && (y*sizeW*channels)+x*channels <= sizeH*sizeW*channels) { 305 | 306 | pixOut[coll+(j*channels)]=pixIn[(y*sizeW*channels)+x*channels]; 307 | pixOut[coll+(j*channels)+1]=pixIn[(y*sizeW*channels)+(x*channels)+1]; 308 | pixOut[coll+(j*channels)+2]=pixIn[(y*sizeW*channels)+(x*channels)+2]; 309 | 310 | } else { 311 | pixOut[coll+(j*channels)]=0; 312 | pixOut[coll+(j*channels)+1]=0; 313 | pixOut[coll+(j*channels)+2]=0; 314 | 315 | } 316 | 317 | } 318 | 319 | } 320 | 321 | return pixOut; 322 | } 323 | 324 | ofPixels imageProc::convolve(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float kernel[9]) { 325 | 326 | for (int i = 1; i < sizeH-1; i++) { 327 | 328 | int collm1=(i-1)*sizeW*channels; 329 | 330 | int coll=i*sizeW*channels; 331 | 332 | int collp1=(i+1)*sizeW*channels; 333 | 334 | for (int j = 1; j < sizeW-1; j++) { 335 | 336 | pixOut[coll+(j*3)]=((pixIn[collm1+((j-1)*3)] * kernel[0]) + (pixIn[collm1+((j)*3)] * kernel[1]) + (pixIn[collm1+((j+1)*3)] * kernel[2]) + (pixIn[coll+((j-1)*3)] * kernel[3]) + (pixIn[coll+((j)*3)] * kernel[4]) + (pixIn[coll+((j+1)*3)] * kernel[5]) + (pixIn[collp1+((j-1)*3)] * kernel[6]) + (pixIn[coll+((j)*3)] * kernel[7]) + (pixIn[collp1+((j+1)*3)] * kernel[8])); 337 | 338 | pixOut[coll+(j*3)+1]=((pixIn[collm1+((j-1)*3)+1] * kernel[0]) + (pixIn[collm1+((j)*3)+1] * kernel[1]) + (pixIn[collm1+((j+1)*3)+1] * kernel[2]) + (pixIn[coll+((j-1)*3)+1] * kernel[3]) + (pixIn[coll+((j)*3)+1] * kernel[4]) + (pixIn[coll+((j+1)*3)+1] * kernel[5]) + (pixIn[collp1+((j-1)*3)+1] * kernel[6]) + (pixIn[coll+((j)*3)+1] * kernel[7]) + (pixIn[collp1+((j+1)*3)+1] * kernel[8])); 339 | 340 | pixOut[coll+(j*3)+2]=((pixIn[collm1+((j-1)*3)+2] * kernel[0]) + (pixIn[collm1+((j)*3)+2] * kernel[1]) + (pixIn[collm1+((j+1)*3)+2] * kernel[2]) + (pixIn[coll+((j-1)*3)+2] * kernel[3]) + (pixIn[coll+((j)*3)+2] * kernel[4]) + (pixIn[coll+((j+1)*3)+2] * kernel[5]) + (pixIn[collp1+((j-1)*3)+2] * kernel[6]) + (pixIn[coll+((j)*3)+2] * kernel[7]) + (pixIn[collp1+((j+1)*3)+2] * kernel[8])); 341 | 342 | } 343 | 344 | } 345 | 346 | return pixOut; 347 | 348 | } 349 | 350 | 351 | 352 | 353 | -------------------------------------------------------------------------------- /week2-more-examples/Image-processing/imageProc.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // imageProc.hpp 3 | // emptyExample 4 | // 5 | // Created by Michael Grierson on 13/11/2017. 6 | // 7 | 8 | #ifndef imageProc_hpp 9 | #define imageProc_hpp 10 | 11 | #include 12 | #include "ofMain.h" 13 | 14 | class imageProc { 15 | 16 | public: 17 | 18 | ofPixels threshRBG(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, int thresh); 19 | 20 | ofPixels iThreshRBG(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, int thresh); 21 | 22 | ofPixels threshGreyscale(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, int thresh); 23 | 24 | ofPixels iThreshGreyscale(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, int thresh); 25 | 26 | ofPixels greyscale(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels); 27 | 28 | ofPixels motionBlur(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float blur); 29 | 30 | ofPixels simpleHighPass(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float blur); 31 | 32 | ofPixels simpleEdgeDetect(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float edge); 33 | 34 | ofPixels resizeX(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float resize); 35 | 36 | ofPixels resizeY(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float resize); 37 | 38 | ofPixels resize(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float resize); 39 | 40 | ofPixels resizeXY(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float resizeX, float resizeY); 41 | 42 | ofPixels resizeXY(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float resizeX, float resizeY, float anchorX, float anchorY); 43 | 44 | ofPixels rotate(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float theta); 45 | 46 | ofPixels rotate(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float theta, int anchorX, int anchorY, int offsetX, int offsetY, float zoomX, float zoomY); 47 | 48 | ofPixels convolve(ofPixels pixIn, ofPixels pixOut, int sizeW, int sizeH, int channels, float kernel[9]); 49 | 50 | float lastVal; 51 | 52 | float lastVals[9]; 53 | 54 | float kernel[9]; 55 | 56 | }; 57 | 58 | 59 | 60 | 61 | #endif /* imageProc_hpp */ 62 | -------------------------------------------------------------------------------- /week2-more-examples/Image-processing/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup(){ 5 | 6 | myImage.load("adele.jpg"); 7 | 8 | 9 | } 10 | 11 | 12 | //-------------------------------------------------------------- 13 | void ofApp::update(){ 14 | 15 | ofPixels pix; 16 | 17 | ofPixels myPixels; 18 | 19 | myPixels.allocate(myImage.getWidth(), myImage.getHeight(),3); 20 | 21 | pix = myImage.getPixels(); 22 | 23 | //for testing convolution 24 | //myPixels = myImage.getPixels(); 25 | 26 | float div = mouseX / 10; 27 | 28 | // myPixels = myImageProc.rotate(pix, myPixels, myImage.getWidth(), myImage.getHeight(), 3, (float )mouseX / 100.0); 29 | 30 | myPixels = myImageProc.rotate(pix, myPixels, myImage.getWidth(), myImage.getHeight(), 3,(float) mouseX/ofGetWidth() * PI * 3, myImage.getWidth()/2,myImage.getHeight()/2,0,0,1,1); 31 | 32 | //box blur 33 | //float myKernel[9]={1.0/9,1.0/9,1.0/9,1.0/9,1.0/9,1.0/9,1.0/9,1.0/9,1.0/9}; 34 | 35 | // motion blur 36 | //float myKernel[9]={0.5/6,0.5/6,1.0/6,0.5/6,0.5/6,1.0/6,0.5/6,0.5/6,1.0/6}; 37 | 38 | // Guassian blur 39 | // float myKernel[9]={1.0/16,2.0/16,1.0/16,2.0/16,4.0/16,2.0/16,1.0/16,2.0/16,1.0/16}; 40 | // 41 | // for (int i = 0 ; i < mouseX/2 ; i++) { 42 | // 43 | // myPixels = myImageProc.convolve(myPixels, myPixels, myImage.getWidth(), myImage.getHeight(), 3, myKernel); 44 | // 45 | // } 46 | 47 | myTexture.allocate(myPixels); 48 | 49 | } 50 | 51 | //-------------------------------------------------------------- 52 | void ofApp::draw(){ 53 | 54 | myTexture.draw(0,0); 55 | 56 | } 57 | 58 | //-------------------------------------------------------------- 59 | void ofApp::keyPressed(int key){ 60 | 61 | } 62 | 63 | //-------------------------------------------------------------- 64 | void ofApp::keyReleased(int key){ 65 | 66 | } 67 | 68 | //-------------------------------------------------------------- 69 | void ofApp::mouseMoved(int x, int y){ 70 | 71 | } 72 | 73 | //-------------------------------------------------------------- 74 | void ofApp::mouseDragged(int x, int y, int button){ 75 | 76 | } 77 | 78 | //-------------------------------------------------------------- 79 | void ofApp::mousePressed(int x, int y, int button){ 80 | 81 | } 82 | 83 | //-------------------------------------------------------------- 84 | void ofApp::mouseReleased(int x, int y, int button){ 85 | 86 | } 87 | 88 | //-------------------------------------------------------------- 89 | void ofApp::mouseEntered(int x, int y){ 90 | 91 | } 92 | 93 | //-------------------------------------------------------------- 94 | void ofApp::mouseExited(int x, int y){ 95 | 96 | } 97 | 98 | //-------------------------------------------------------------- 99 | void ofApp::windowResized(int w, int h){ 100 | 101 | } 102 | 103 | //-------------------------------------------------------------- 104 | void ofApp::gotMessage(ofMessage msg){ 105 | 106 | } 107 | 108 | //-------------------------------------------------------------- 109 | void ofApp::dragEvent(ofDragInfo dragInfo){ 110 | 111 | } 112 | -------------------------------------------------------------------------------- /week2-more-examples/Image-processing/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | #include "imageProc.hpp" 5 | 6 | class ofApp : public ofBaseApp{ 7 | public: 8 | void setup(); 9 | void update(); 10 | void draw(); 11 | 12 | void keyPressed(int key); 13 | void keyReleased(int key); 14 | void mouseMoved(int x, int y); 15 | void mouseDragged(int x, int y, int button); 16 | void mousePressed(int x, int y, int button); 17 | void mouseReleased(int x, int y, int button); 18 | void mouseEntered(int x, int y); 19 | void mouseExited(int x, int y); 20 | void windowResized(int w, int h); 21 | void dragEvent(ofDragInfo dragInfo); 22 | void gotMessage(ofMessage msg); 23 | 24 | ofImage myImage; 25 | ofTexture myTexture; 26 | imageProc myImageProc; 27 | }; 28 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/3Dlabs-license.txt: -------------------------------------------------------------------------------- 1 | /************************************************************************ 2 | * * 3 | * Copyright (C) 2002-2005 3Dlabs Inc. Ltd. * 4 | * * 5 | * All rights reserved. * 6 | * * 7 | * Redistribution and use in source and binary forms, with or without * 8 | * modification, are permitted provided that the following conditions * 9 | * are met: * 10 | * * 11 | * Redistributions of source code must retain the above copyright * 12 | * notice, this list of conditions and the following disclaimer. * 13 | * * 14 | * Redistributions in binary form must reproduce the above * 15 | * copyright notice, this list of conditions and the following * 16 | * disclaimer in the documentation and/or other materials provided * 17 | * with the distribution. * 18 | * * 19 | * Neither the name of 3Dlabs Inc. Ltd. nor the names of its * 20 | * contributors may be used to endorse or promote products derived * 21 | * from this software without specific prior written permission. * 22 | * * 23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * 24 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * 25 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * 26 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * 27 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * 28 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * 29 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * 30 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * 31 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * 32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * 33 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * 34 | * POSSIBILITY OF SUCH DAMAGE. * 35 | * * 36 | ************************************************************************/ -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/CarToPol_GLSL.frag: -------------------------------------------------------------------------------- 1 | //setup for 2 texture 2 | varying vec2 texcoord0; 3 | varying vec2 texdim0; 4 | uniform vec2 origin; 5 | uniform vec2 scale; 6 | uniform sampler2DRect tex0; 7 | const float pi=3.1415926; 8 | 9 | 10 | void main() 11 | { 12 | vec2 point = abs(mod((texcoord0/texdim0)*scale+origin,1.));//normalize coordinates 13 | 14 | // cartesian to polar conversion 15 | vec2 dt = 2.*(point-0.5); 16 | float radius = sqrt(dot(dt,dt)); //hypot 17 | float theta = atan(dt.y,dt.x)+pi; 18 | vec2 topol = vec2(radius,theta/(2.*pi))*texdim0; 19 | vec4 a = texture2DRect(tex0,topol); 20 | // output texture 21 | gl_FragColor = a; 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/CarToPol_GLSL.vert: -------------------------------------------------------------------------------- 1 | varying vec2 texcoord0; 2 | varying vec2 texcoord1; 3 | varying vec2 texdim0; 4 | varying vec2 texdim1; 5 | varying vec2 texorient0; 6 | varying vec2 texorient1; 7 | 8 | void main() 9 | { 10 | // perform standard transform on vertex 11 | gl_Position = ftransform(); 12 | 13 | // transform texcoords 14 | texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 15 | texcoord1 = vec2(gl_TextureMatrix[1] * gl_MultiTexCoord1); 16 | // extract the x and y scalars from the texture matrix to determine dimensions 17 | texdim0 = vec2 (abs(gl_TextureMatrix[0][0][0]),abs(gl_TextureMatrix[0][1][1])); 18 | texdim1 = vec2 (abs(gl_TextureMatrix[1][0][0]),abs(gl_TextureMatrix[1][1][1])); 19 | // extract the sign for orientation 20 | texorient0 = vec2 (gl_TextureMatrix[0][0][0]/texdim0.x,gl_TextureMatrix[0][1][1]/texdim0.y); 21 | texorient1 = vec2 (gl_TextureMatrix[1][0][0]/texdim1.x,gl_TextureMatrix[1][1][1]/texdim1.y); 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/ChromaAb_GLSL.frag: -------------------------------------------------------------------------------- 1 | uniform sampler2DRect tex; 2 | uniform vec2 windowSize; 3 | uniform float offsetALL; 4 | 5 | varying vec2 texcoord0; 6 | 7 | 8 | void main() 9 | { 10 | //vec2 offset = ( gl_FragCoord.xy/windowSize - vec2( 0.5 ) ) * 0.005; 11 | vec2 offset = ( gl_FragCoord.xy/windowSize - vec2( 0.5 ) ) * offsetALL; 12 | 13 | float rChannel = texture2DRect( tex, texcoord0 ).r; 14 | float gChannel = texture2DRect( tex, texcoord0 + offset ).g; 15 | float bChannel = texture2DRect( tex, texcoord0 + offset * 2.0 ).b; 16 | vec3 finalCol = vec3( rChannel, gChannel, bChannel ); 17 | 18 | gl_FragColor.rgb = finalCol; 19 | gl_FragColor.a = 1.0; 20 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/ChromaAb_GLSL.vert: -------------------------------------------------------------------------------- 1 | varying vec2 texcoord0; 2 | varying vec2 texcoord1; 3 | varying vec2 texdim0; 4 | varying vec2 texdim1; 5 | varying vec2 texorient0; 6 | varying vec2 texorient1; 7 | 8 | void main() 9 | { 10 | // perform standard transform on vertex 11 | gl_Position = ftransform(); 12 | 13 | // transform texcoords 14 | texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 15 | texcoord1 = vec2(gl_TextureMatrix[1] * gl_MultiTexCoord1); 16 | // extract the x and y scalars from the texture matrix to determine dimensions 17 | texdim0 = vec2 (abs(gl_TextureMatrix[0][0][0]),abs(gl_TextureMatrix[0][1][1])); 18 | texdim1 = vec2 (abs(gl_TextureMatrix[1][0][0]),abs(gl_TextureMatrix[1][1][1])); 19 | // extract the sign for orientation 20 | texorient0 = vec2 (gl_TextureMatrix[0][0][0]/texdim0.x,gl_TextureMatrix[0][1][1]/texdim0.y); 21 | texorient1 = vec2 (gl_TextureMatrix[1][0][0]/texdim1.x,gl_TextureMatrix[1][1][1]/texdim1.y); 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Fisheye_GLSL.frag: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | // 3 | // Copyright 2003, ATI Technologies, Inc., All rights reserved. 4 | // 5 | // Permission to use, copy, modify, and distribute this software and its 6 | // documentation for any purpose and without fee is hereby granted, 7 | // provided that the above copyright notice appear in all copies and derivative 8 | // works and that both the copyright notice and this permission notice appear in 9 | // support documentation, and that the name of ATI Technologies, Inc. not be used 10 | // in advertising or publicity pertaining to distribution of the software without 11 | // specific, written prior permission. 12 | // 13 | /////////////////////////////////////////////////////////////////////////////// 14 | 15 | #define EPSILON 0.000011 16 | 17 | // vertex to fragment shader io 18 | varying vec2 texcoord0; 19 | varying vec2 texdim0; 20 | 21 | // globals 22 | uniform float lensradius; 23 | uniform float signcurvature; 24 | 25 | // samplers 26 | uniform sampler2DRect tex0; 27 | 28 | // entry point 29 | void 30 | main() 31 | { 32 | float curvature = abs(signcurvature); 33 | float extent = lensradius; 34 | float optics = extent / log2(curvature * extent + 1.0) / 1.4427; 35 | vec2 normalizeTD = texcoord0/texdim0; 36 | vec2 PP = normalizeTD - vec2(0.5,0.5); 37 | float P0 = PP[0]; 38 | float P1 = PP[1]; 39 | float radius = sqrt(P0 * P0 + P1 * P1); 40 | 41 | float cosangle = P0 / radius; 42 | float sinangle = P1 / radius; 43 | 44 | float rad1, rad2, newradius; 45 | rad1 = (exp2((radius / optics) * 1.4427) - 1.0) / curvature; 46 | rad2 = optics * log2(1.0 + curvature * radius) / 1.4427; 47 | newradius = signcurvature > 0.0 ? rad1 : rad2; 48 | 49 | vec2 FE = vec2(newradius * cosangle + 0.5,newradius * sinangle + 0.5); 50 | FE = radius <= extent ? FE : normalizeTD; 51 | FE = curvature < EPSILON ? normalizeTD : FE; 52 | 53 | gl_FragColor = texture2DRect(tex0, FE*texdim0); 54 | } 55 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Fisheye_GLSL.vert: -------------------------------------------------------------------------------- 1 | varying vec2 texcoord0; 2 | varying vec2 texcoord1; 3 | varying vec2 texdim0; 4 | varying vec2 texdim1; 5 | varying vec2 texorient0; 6 | varying vec2 texorient1; 7 | 8 | void main() 9 | { 10 | // perform standard transform on vertex 11 | gl_Position = ftransform(); 12 | 13 | // transform texcoords 14 | texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 15 | texcoord1 = vec2(gl_TextureMatrix[1] * gl_MultiTexCoord1); 16 | // extract the x and y scalars from the texture matrix to determine dimensions 17 | texdim0 = vec2 (abs(gl_TextureMatrix[0][0][0]),abs(gl_TextureMatrix[0][1][1])); 18 | texdim1 = vec2 (abs(gl_TextureMatrix[1][0][0]),abs(gl_TextureMatrix[1][1][1])); 19 | // extract the sign for orientation 20 | texorient0 = vec2 (gl_TextureMatrix[0][0][0]/texdim0.x,gl_TextureMatrix[0][1][1]/texdim0.y); 21 | texorient1 = vec2 (gl_TextureMatrix[1][0][0]/texdim1.x,gl_TextureMatrix[1][1][1]/texdim1.y); 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Gauss_GLSL.frag: -------------------------------------------------------------------------------- 1 | //Gauss blur 2 | uniform sampler2DRect image; 3 | 4 | varying vec2 texcoordM; 5 | varying vec2 texcoordB0; 6 | varying vec2 texcoordF0; 7 | varying vec2 texcoordB1; 8 | varying vec2 texcoordF1; 9 | varying vec2 texcoordB2; 10 | varying vec2 texcoordF2; 11 | 12 | void main(){ 13 | vec4 sampleM = texture2DRect(image, texcoordM); 14 | vec4 sampleB0 = texture2DRect(image, texcoordB0); 15 | vec4 sampleF0 = texture2DRect(image, texcoordF0); 16 | vec4 sampleB1 = texture2DRect(image, texcoordB1); 17 | vec4 sampleF1 = texture2DRect(image, texcoordF1); 18 | vec4 sampleB2 = texture2DRect(image, texcoordB2); 19 | vec4 sampleF2 = texture2DRect(image, texcoordF2); 20 | 21 | gl_FragColor = 0.1752 * sampleM + 0.1658 * (sampleB0 + sampleF0) + 0.1403 * (sampleB1 + sampleF1) + 0.1063 * (sampleB2 + sampleF2); 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Gauss_GLSL.vert: -------------------------------------------------------------------------------- 1 | uniform vec2 width; 2 | 3 | varying vec2 texcoordM; 4 | varying vec2 texcoordB0; 5 | varying vec2 texcoordF0; 6 | varying vec2 texcoordB1; 7 | varying vec2 texcoordF1; 8 | varying vec2 texcoordB2; 9 | varying vec2 texcoordF2; 10 | 11 | 12 | void main() 13 | { 14 | // perform standard transform on vertex 15 | gl_Position = ftransform(); 16 | 17 | // transform texcoord 18 | vec2 texcoord = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 19 | 20 | // get sample positions 21 | texcoordM = texcoord; 22 | texcoordB0 = texcoord - width; 23 | texcoordF0 = texcoord + width; 24 | texcoordB1 = texcoord - width * 2.0; 25 | texcoordF1 = texcoord + width * 2.0; 26 | texcoordB2 = texcoord - width * 3.0; 27 | texcoordF2 = texcoord + width * 3.0; 28 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/LumaKey_GLSL.frag: -------------------------------------------------------------------------------- 1 | // texcoords 2 | varying vec2 texcoord0; 3 | varying vec2 texcoord1; 4 | 5 | // samplers 6 | uniform sampler2DRect tex0; 7 | uniform sampler2DRect tex1; 8 | 9 | // blend amount 10 | uniform float luma; 11 | uniform float tol; 12 | uniform float fade; 13 | uniform float invert; 14 | uniform float mode; 15 | uniform float binary; 16 | uniform vec4 lumcoeff; 17 | 18 | // entry point 19 | void main() 20 | { 21 | vec4 one = vec4(1.0); 22 | 23 | vec4 a = texture2DRect(tex0, texcoord0); 24 | vec4 b = texture2DRect(tex1, texcoord1); 25 | 26 | // calculate our luminance 27 | float luminance = dot(a,lumcoeff); 28 | 29 | // measure distance from target 30 | float delta = abs(luminance-luma); 31 | 32 | // determine scaling coefficient witin our fade range 33 | float scale = smoothstep(abs(tol),abs(tol)+abs(fade),delta); 34 | 35 | // invert if necessary 36 | float mixamount = mix(scale,1.-scale,invert); 37 | 38 | // blend between sources based on mixamount 39 | vec4 result = mix(b,a,vec4(mixamount)); 40 | 41 | // if not binary just set alpha value 42 | a.a = mixamount; 43 | result = mix(a,result,vec4(binary)); 44 | 45 | // result either blend or mask based on mode 46 | gl_FragColor = mix(result,vec4(mixamount),vec4(mode)); 47 | 48 | // setting the fragment color is a useful means of debugging 49 | //gl_FragColor = vec4(luminance); 50 | } 51 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/LumaKey_GLSL.vert: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Derek Gerstmann - derek@cycling74.com 4 | * Copyright 2005 - Cycling '74 5 | * 6 | * GLSL vertex program for doing a standard vertex transform and 7 | * with texture coordinates. 8 | * 9 | */ 10 | 11 | varying vec2 texcoord0; 12 | varying vec2 texcoord1; 13 | varying vec2 texcoord2; 14 | varying vec2 texcoord3; 15 | varying vec2 texcoord4; 16 | varying vec2 texcoord5; 17 | varying vec2 texcoord6; 18 | varying vec2 texcoord7; 19 | 20 | void main() 21 | { 22 | // perform standard transform on vertex 23 | gl_Position = ftransform(); 24 | 25 | // transform texcoords 26 | texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 27 | texcoord1 = vec2(gl_TextureMatrix[1] * gl_MultiTexCoord1); 28 | texcoord2 = vec2(gl_TextureMatrix[2] * gl_MultiTexCoord2); 29 | texcoord3 = vec2(gl_TextureMatrix[3] * gl_MultiTexCoord3); 30 | texcoord4 = vec2(gl_TextureMatrix[4] * gl_MultiTexCoord4); 31 | texcoord5 = vec2(gl_TextureMatrix[5] * gl_MultiTexCoord5); 32 | texcoord6 = vec2(gl_TextureMatrix[6] * gl_MultiTexCoord6); 33 | texcoord7 = vec2(gl_TextureMatrix[7] * gl_MultiTexCoord7); 34 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Rota_GLSL.frag: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | 5 | 6 | 7 | */ 8 | 9 | //setup for 2 texture 10 | varying vec2 texcoord0; 11 | varying vec2 texcoord1; 12 | varying vec2 texdim0; 13 | uniform vec2 zoom; 14 | uniform vec2 offset; 15 | uniform float theta; 16 | uniform vec2 anchor; 17 | uniform int boundmode; 18 | uniform sampler2DRect tex0; 19 | uniform sampler2DRect tex1; 20 | const float pi=3.1415926; 21 | 22 | void main() 23 | { 24 | // where is the point? 25 | vec2 sizea = texdim0; 26 | vec2 point = texcoord0; 27 | 28 | //transormation matrices 29 | mat2 sca = mat2 (1./zoom.x,0.,0.,1./zoom.y);//scaling matrix (zoom) 30 | mat2 rot = mat2 (cos(theta),sin(theta),-sin(theta),cos(theta));//rotation matrix 31 | 32 | //perform transform 33 | vec2 no = ((((point-anchor*sizea)*rot)*sca)+anchor*sizea)+offset; 34 | 35 | //create boundmodes 36 | vec2 no2 = mod(no,sizea);//wrap 37 | 38 | vec2 no4 = mix(mod(no,sizea),sizea-mod(no,sizea),floor(mod(no,sizea*2.)/sizea));//folded coords 39 | 40 | // sampler coord 41 | vec2 tc = no*float(boundmode==0) + no*float(boundmode==1) + no2*float(boundmode==2) + no*float(boundmode==3) + no4*float(boundmode==4); 42 | 43 | 44 | //sample textures 45 | vec4 smp0 = texture2DRect(tex0,tc); 46 | vec4 smp1 = texture2DRect(tex1,texcoord0); 47 | 48 | vec2 outbound = sign(floor(no/sizea));//check for point>size 49 | float boundchk = float(sign(float(outbound.x!=0.)+float(outbound.y!=0.))); 50 | float checkm0 = float(boundmode==0)*boundchk; 51 | float checkm1 = float(boundmode==1)*float(boundchk==0.); 52 | vec4 ifb0 = mix(smp0,smp1,checkm0);//ignore 53 | vec4 final = ifb0*float(boundmode != 1) + ifb0*float(checkm1==1.);//clear 54 | 55 | 56 | // output texture 57 | gl_FragColor = final; 58 | } 59 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Rota_GLSL.vert: -------------------------------------------------------------------------------- 1 | varying vec2 texcoord0; 2 | varying vec2 texcoord1; 3 | varying vec2 texdim0; 4 | varying vec2 texdim1; 5 | varying vec2 texorient0; 6 | varying vec2 texorient1; 7 | 8 | void main() 9 | { 10 | // perform standard transform on vertex 11 | gl_Position = ftransform(); 12 | 13 | // transform texcoords 14 | texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 15 | texcoord1 = vec2(gl_TextureMatrix[1] * gl_MultiTexCoord1); 16 | // extract the x and y scalars from the texture matrix to determine dimensions 17 | texdim0 = vec2 (abs(gl_TextureMatrix[0][0][0]),abs(gl_TextureMatrix[0][1][1])); 18 | texdim1 = vec2 (abs(gl_TextureMatrix[1][0][0]),abs(gl_TextureMatrix[1][1][1])); 19 | // extract the sign for orientation 20 | texorient0 = vec2 (gl_TextureMatrix[0][0][0]/texdim0.x,gl_TextureMatrix[0][1][1]/texdim0.y); 21 | texorient1 = vec2 (gl_TextureMatrix[1][0][0]/texdim1.x,gl_TextureMatrix[1][1][1]/texdim1.y); 22 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Wobble_GLSL.frag: -------------------------------------------------------------------------------- 1 | // Constants 2 | const float C_PI = 3.1415; 3 | const float C_2PI = 2.0 * C_PI; 4 | const float C_2PI_I = 1.0 / (2.0 * C_PI); 5 | const float C_PI_2 = C_PI / 2.0; 6 | 7 | varying float lightintensity; 8 | 9 | uniform float radius; 10 | uniform vec2 freq; 11 | uniform vec2 amp; 12 | 13 | uniform sampler2DRect image; 14 | 15 | void main (void) 16 | { 17 | vec2 perturb; 18 | float rad; 19 | vec3 color; 20 | 21 | // Compute a perturbation factor for the x-direction 22 | rad = (gl_TexCoord[0].s + gl_TexCoord[0].t - 1.0 + radius) * freq.x; 23 | 24 | // Wrap to -2.0*PI, 2*PI 25 | rad = rad * C_2PI_I; 26 | rad = fract(rad); 27 | rad = rad * C_2PI; 28 | 29 | // Center in -PI, PI 30 | if (rad > C_PI) rad = rad - C_2PI; 31 | if (rad < -C_PI) rad = rad + C_2PI; 32 | 33 | // Center in -PI/2, PI/2 34 | if (rad > C_PI_2) rad = C_PI - rad; 35 | if (rad < -C_PI_2) rad = -C_PI - rad; 36 | 37 | perturb.x = (rad - (rad * rad * rad / 6.0)) * amp.x; 38 | 39 | // Now compute a perturbation factor for the y-direction 40 | rad = (gl_TexCoord[0].s - gl_TexCoord[0].t + radius) * freq.y; 41 | 42 | // Wrap to -2*PI, 2*PI 43 | rad = rad * C_2PI_I; 44 | rad = fract(rad); 45 | rad = rad * C_2PI; 46 | 47 | // Center in -PI, PI 48 | if (rad > C_PI) rad = rad - C_2PI; 49 | if (rad < -C_PI) rad = rad + C_2PI; 50 | 51 | // Center in -PI/2, PI/2 52 | if (rad > C_PI_2) rad = C_PI - rad; 53 | if (rad < -C_PI_2) rad = -C_PI - rad; 54 | 55 | perturb.y = (rad - (rad * rad * rad / 6.0)) * amp.y; 56 | 57 | color = vec3 (texture2DRect(image, perturb + gl_TexCoord[0].st)); 58 | 59 | gl_FragColor = vec4 (color, 1.0); 60 | } 61 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/Wobble_GLSL.vert: -------------------------------------------------------------------------------- 1 | //varying float lightintensity; 2 | uniform vec3 lightpos; 3 | 4 | const float ks = 0.1; 5 | const float kd = 1.0 - ks; 6 | 7 | void main(void) 8 | { 9 | 10 | //vec3 lightpos = (0.0,0.0,0.0); //Unused Used to be applicable, but leaving it in case someone finds it useful. 11 | vec3 pos = vec3 (gl_ModelViewMatrix * gl_Vertex); 12 | vec3 tnorm = normalize(gl_NormalMatrix * gl_Normal); 13 | vec3 lightVec = normalize(lightpos - pos); 14 | vec3 reflectVec = reflect(-lightVec, tnorm); 15 | vec3 viewVec = normalize(-pos); 16 | 17 | float spec = max(dot(reflectVec, viewVec), 0.0); 18 | spec = pow(spec, 16.0); 19 | 20 | //lightintensity = kd * max(dot(lightVec, tnorm), 0.0)+ ks * spec; 21 | 22 | gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0; 23 | gl_Position = ftransform(); 24 | } 25 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/bloom_GLSL.frag: -------------------------------------------------------------------------------- 1 | //#version 120 2 | //#extension GL_EXT_gpu_shader4: enable 3 | //#extension GL_ARB_texture_rectangle : enable 4 | 5 | // define our varying texture coordinates 6 | varying vec2 texCoord; 7 | uniform sampler2DRect tex0; 8 | uniform float bloom; 9 | 10 | 11 | void main() 12 | { 13 | vec4 sum, dry; 14 | int i, j; 15 | 16 | for( i = -3 ;i <= 3; i++) 17 | { 18 | for (j = -3; j <= 3; j++) 19 | { 20 | sum += texture2DRect(tex0, gl_TexCoord[0].st + vec2(j, i) * 2.0) * 0.25; // 2.0 = "radius step" 21 | } 22 | } 23 | 24 | sum = sum * sum * 0.012; // 0.012 = arbitrary value 25 | dry = texture2DRect(tex0, gl_TexCoord[0].st ); 26 | gl_FragColor = bloom * (1.0 - ((1.0 - sum) * (1.0 - dry))) + ((1.0 - bloom) * dry); // screen mode blending 27 | 28 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/bloom_GLSL.vert: -------------------------------------------------------------------------------- 1 | #version 120 2 | 3 | varying vec2 texCoord; 4 | 5 | void main() 6 | { 7 | texCoord = vec2(gl_MultiTexCoord0); 8 | gl_Position = gl_ProjectionMatrix * gl_ModelViewMatrix * gl_Vertex; 9 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/brcosa_GLSL.frag: -------------------------------------------------------------------------------- 1 | // 2 | // Fragment shader for modifying image contrast by 3 | // interpolation and extrapolation 4 | // 5 | // Author: Randi Rost 6 | // 7 | // Copyright (c) 2002: 3Dlabs, Inc. 8 | // 9 | // See 3Dlabs-License.txt for license information 10 | // 11 | 12 | const vec3 LumCoeff = vec3 (0.2125, 0.7154, 0.0721); 13 | 14 | varying vec2 texcoord; 15 | uniform sampler2DRect image; 16 | 17 | uniform vec3 avgluma; 18 | uniform float saturation; 19 | uniform float contrast; 20 | uniform float brightness; 21 | uniform float alpha; 22 | 23 | void main (void) 24 | { 25 | vec3 texColor = texture2DRect(image, texcoord).rgb; 26 | vec3 intensity = vec3 (dot(texColor, LumCoeff)); 27 | vec3 color = mix(intensity, texColor, saturation); 28 | color = mix(avgluma, color, contrast); 29 | color *= brightness; 30 | gl_FragColor = vec4 (color, color.g*alpha); 31 | } 32 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/brcosa_GLSL.vert: -------------------------------------------------------------------------------- 1 | // 2 | // Vertex shader for modifying image contrast by 3 | // interpolation and extrapolation 4 | // 5 | // Author: Randi Rost 6 | // 7 | // Copyright (c) 2003-2005: 3Dlabs, Inc. 8 | // 9 | // See 3Dlabs-License.txt for license information 10 | // 11 | 12 | varying vec2 texcoord; 13 | 14 | void main (void) 15 | 16 | { 17 | gl_Position = ftransform(); 18 | texcoord = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0); 19 | } -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/bin/data/shaders/vertexChomAbPassthrough.glsl: -------------------------------------------------------------------------------- 1 | #version 120 2 | 3 | varying vec3 vVertex; 4 | varying vec4 vColor; 5 | void main() 6 | { 7 | vVertex = gl_Vertex.xyz; 8 | vColor = gl_Color; 9 | gl_TexCoord[0] = gl_MultiTexCoord0; 10 | gl_Position = ftransform(); 11 | } 12 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | #include "ofAppGlutWindow.h" 3 | 4 | //-------------------------------------------------------------- 5 | int main(){ 6 | ofAppGlutWindow window; // create a window 7 | // set width, height, mode (OF_WINDOW or OF_FULLSCREEN) 8 | ofSetupOpenGL(&window, 1280, 720, OF_WINDOW); 9 | ofRunApp(new testApp()); // start the app 10 | } 11 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/more-shaders/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class testApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | 11 | void keyPressed(int key); 12 | void keyReleased(int key); 13 | void mouseMoved(int x, int y); 14 | void mouseDragged(int x, int y, int button); 15 | void mousePressed(int x, int y, int button); 16 | void mouseReleased(int x, int y, int button); 17 | void windowResized(int w, int h); 18 | void dragEvent(ofDragInfo dragInfo); 19 | void gotMessage(ofMessage msg); 20 | 21 | ofTexture tex1; 22 | ofTexture tex2; 23 | ofFbo chromaFbo; //For storing the FBO contents of the chromatic abberation shader 24 | ofFbo brcosaFbo; //For storing the FBO contents of brcosa shader 25 | ofShader bloomShader; //Bloom shader (from OF forums) 26 | ofShader blurShader; //So-so Gaussian Blur 27 | ofShader brcosaShader; //Brightness, Contrast, Saturation adjustments 28 | ofShader chromaShader; //Poor mans chromatic abberation 29 | ofShader fishShader; //Fisheye 30 | ofShader rotaShader; //Shader from jit.rota allowing you to zoom, rotate, offset a texture 31 | ofShader carToPolShader; //Cartesian to polar shader 32 | ofShader wobbleShader; //Wobbulation displacement 33 | ofShader lumaShader; //Luma Keying 34 | ofShader radialShader; //RadialBlur 35 | 36 | int camWidth; 37 | int camHeight; 38 | ofVideoGrabber vidGrabber; 39 | }; 40 | 41 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/of_shader_example_150/bin/data/shadersGL3/shader.frag: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | uniform float time; 4 | uniform vec2 mouse; 5 | uniform vec2 resolution; 6 | in vec3 myNormal; 7 | 8 | out vec4 outputColor; 9 | 10 | void main() 11 | { 12 | // light from the top 13 | vec3 light = vec3(0.,10.,5.); 14 | 15 | // Get the normal of the light 16 | // Remember the Unit Vector of the light is the direction of the light 17 | 18 | light = normalize(light); 19 | 20 | // return the maximum of either 0, or the squared distance 21 | // This means you can add an ambient light later without 22 | // getting weird values from the output of the dot prod. 23 | float prod = max(0., dot(myNormal,light)); 24 | // use the dot product of the normal and the light 25 | // To calculate the shading for the sphere 26 | outputColor = vec4(prod, prod, prod, 1.0) + vec4(0.2,0.2,0.2,1.0); 27 | } 28 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/of_shader_example_150/bin/data/shadersGL3/shader.vert: -------------------------------------------------------------------------------- 1 | #version 150 2 | 3 | // these are for the programmable pipeline system 4 | uniform mat4 modelViewProjectionMatrix; 5 | // in OF the position attribute is a vec4 6 | in vec4 position; 7 | // this is the normal from the mesh 8 | in vec3 normal; 9 | out vec3 myNormal; 10 | // the time value is passed into the shader by the OF app. 11 | uniform float time; 12 | uniform vec2 mouse; 13 | 14 | float rand(vec3 x) { 15 | // FM noise 16 | return abs(sin(cos(dot(x,vec3(mouse.x,mouse.y,mouse.x)))* 100.)); 17 | } 18 | 19 | void main() 20 | { 21 | float angle = time; 22 | //rotation 23 | mat4 rotateX = mat4(1,0,0,0,0,cos(angle),sin(angle),0,0,-sin(angle),cos(angle),0,0,0,0,1); 24 | 25 | myNormal = normal; 26 | 27 | vec3 newPosition = position.xyz * (1.0 +(0.2 * (rand(position.xyz)))); 28 | 29 | // we need to make the new positions into a vec4 so we can apply the rotation matrix 30 | vec4 rotatedPos = rotateX * vec4(newPosition,1.0); 31 | // we're now ready to generate the new normals after the rotation. 32 | // this is crucial otherwise it will look like our light is also rotating 33 | myNormal = normalize(rotatedPos.xyz); 34 | 35 | gl_Position = modelViewProjectionMatrix * vec4(rotatedPos.xyz,1.); 36 | } 37 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/of_shader_example_150/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "ofApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofGLWindowSettings settings; 8 | settings.setGLVersion(3,2); 9 | ofCreateWindow(settings); 10 | 11 | 12 | // this kicks off the running of my app 13 | // can be OF_WINDOW or OF_FULLSCREEN 14 | // pass in width and height too: 15 | ofRunApp(new ofApp()); 16 | 17 | } 18 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/of_shader_example_150/src/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup(){ 5 | 6 | ofEnableDepthTest(); 7 | 8 | shader.load("shadersGL3/shader"); 9 | 10 | sphere.set(200., 50); 11 | 12 | cam.setNearClip(1); 13 | cam.setFarClip(-100); 14 | cam.setPosition(0,0,1000);} 15 | 16 | //-------------------------------------------------------------- 17 | void ofApp::update(){ 18 | 19 | } 20 | 21 | //-------------------------------------------------------------- 22 | void ofApp::draw(){ 23 | 24 | ofBackground(0, 0, 0); 25 | cam.begin(); 26 | shader.begin(); 27 | 28 | shader.setUniform1f("time", ofGetElapsedTimef()); 29 | shader.setUniform2f("mouse", mouseX, mouseY); 30 | shader.setUniform2f("resolution", ofGetWidth(), ofGetHeight()); 31 | 32 | sphere.draw(); 33 | 34 | shader.end(); 35 | cam.end(); 36 | 37 | } 38 | 39 | //-------------------------------------------------------------- 40 | void ofApp::keyPressed(int key){ 41 | 42 | } 43 | 44 | //-------------------------------------------------------------- 45 | void ofApp::keyReleased(int key){ 46 | 47 | } 48 | 49 | //-------------------------------------------------------------- 50 | void ofApp::mouseMoved(int x, int y){ 51 | 52 | } 53 | 54 | //-------------------------------------------------------------- 55 | void ofApp::mouseDragged(int x, int y, int button){ 56 | 57 | } 58 | 59 | //-------------------------------------------------------------- 60 | void ofApp::mousePressed(int x, int y, int button){ 61 | 62 | } 63 | 64 | //-------------------------------------------------------------- 65 | void ofApp::mouseReleased(int x, int y, int button){ 66 | 67 | } 68 | 69 | //-------------------------------------------------------------- 70 | void ofApp::windowResized(int w, int h){ 71 | 72 | } 73 | 74 | //-------------------------------------------------------------- 75 | void ofApp::gotMessage(ofMessage msg){ 76 | 77 | } 78 | 79 | //-------------------------------------------------------------- 80 | void ofApp::dragEvent(ofDragInfo dragInfo){ 81 | 82 | } 83 | -------------------------------------------------------------------------------- /week2-more-examples/Shaders/of_shader_example_150/src/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | 8 | void setup(); 9 | void update(); 10 | void draw(); 11 | 12 | void keyPressed(int key); 13 | void keyReleased(int key); 14 | void mouseMoved(int x, int y); 15 | void mouseDragged(int x, int y, int button); 16 | void mousePressed(int x, int y, int button); 17 | void mouseReleased(int x, int y, int button); 18 | void windowResized(int w, int h); 19 | void dragEvent(ofDragInfo dragInfo); 20 | void gotMessage(ofMessage msg); 21 | 22 | ofEasyCam cam; 23 | ofShader shader; 24 | ofSpherePrimitive sphere; 25 | }; 26 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/bump-map/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "ofApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofSetupOpenGL(1024,768, OF_WINDOW); // <-------- setup the GL context 8 | 9 | // this kicks off the running of my app 10 | // can be OF_WINDOW or OF_FULLSCREEN 11 | // pass in width and height too: 12 | ofRunApp( new ofApp()); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/bump-map/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // This example uses pure openGL (mostly) to create a procedural sphere, and light it by computing the correct surface normals. 4 | 5 | //-------------------------------------------------------------- 6 | void ofApp::setup(){ 7 | 8 | ofEnableDepthTest(); 9 | 10 | dim=40; 11 | spacing = ((PI * 2) / dim); 12 | size = 2; 13 | 14 | // This code generates all the vertices we need to make a sphere 15 | for (int i = 0; i < dim + 1; i++) { 16 | 17 | // ******* Calculate the current ring position ******** 18 | 19 | // To calculate the current ring position, we divide our spacing variable in half 20 | // This is because otherwise, the cosine / sine waves will 21 | // generate twice as many numbers as we need (e.g. positive then negative) 22 | // We only need the first half of the wave 23 | 24 | // z is the position of the current ring 25 | float z = size * cos(spacing / 2 * i); 26 | 27 | // This calculates the size of the current ring 28 | float s = size * sin(spacing / 2 * i); 29 | 30 | // For each ring.. 31 | 32 | for (int j = 0; j < dim ; j++) { 33 | 34 | // ...create the next point in the circle at the current size s, at the current depth z 35 | 36 | ofVec3f point; 37 | 38 | point.set(cos(spacing * j) * s,sin(spacing * j) * s,z); 39 | 40 | points.push_back(point); 41 | 42 | } 43 | } 44 | 45 | cout << "done"; 46 | 47 | // this creates noise which we can use as a bump map. 48 | for (int i = 0; i < points.size(); i++) { 49 | 50 | cout << points[i].x << " " << points[i].y << " " << points[i].z << endl; 51 | 52 | } 53 | 54 | 55 | //We are using ofCamera, which sets up a projection matrix like the ones we have built by hand. 56 | //Here we can set the clipping planes 57 | //And also the camera position 58 | //If we wanted we could adjust the FOV and a bunch of other stuff 59 | 60 | 61 | cam.setNearClip(1); 62 | cam.setFarClip(-100); 63 | cam.setPosition(0,0,10); 64 | 65 | // This turns on the lighting 66 | glEnable(GL_LIGHTING); 67 | glEnable(GL_LIGHT0); 68 | 69 | // This generates a bunch of arrays that we can pass in as lighting parameters 70 | GLfloat ambientLight[] = { 0.2f, 0.2f, 0.2f, 1.0f }; 71 | GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f }; 72 | GLfloat specularLight[] = { 0.15f, 0.15f, 0.15f, 1.0f }; 73 | // GLfloat position[] = { -1.5f, 1.0f, -4.0f, 1.0f }; 74 | 75 | // These fuctions define the characteristics of our light, GL_LIGHT0 - remember there are 8 lights 76 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); 77 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); 78 | glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); 79 | // glLightfv(GL_LIGHT0, GL_POSITION, position); 80 | 81 | // This turns on color tracking. Might be on by default in openFrameworks 82 | glEnable(GL_COLOR_MATERIAL); 83 | // This sets the material properties. Important for lighting. 84 | // glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); 85 | 86 | //Again, I think this is the default mode in OF. But if you aren't using OF you need to do this manually 87 | 88 | glShadeModel(GL_SMOOTH); // Can also be flat. When flat, the last colour specified for a triangle is the colour of the triangle 89 | 90 | for (int i =0 ; i < points.size() ; i++) { 91 | 92 | ofVec3f noise; 93 | noise.set(ofRandomf(), ofRandomf(),ofRandomf()); 94 | noises.push_back(noise); 95 | 96 | } 97 | 98 | } 99 | 100 | 101 | //-------------------------------------------------------------- 102 | void ofApp::update(){ 103 | 104 | 105 | } 106 | 107 | //-------------------------------------------------------------- 108 | 109 | ofVec3f ofApp::normal(std::vector tri) { 110 | 111 | //Each face in the geometry needs to have a normal vector perpendicular to the face. 112 | //This is computed by getting the cross product of two points along the surface, a and b. 113 | 114 | ofVec3f a, b, normal; 115 | 116 | // We need to work out what a and b are 117 | // using CCW 118 | 119 | // a 120 | a.x = tri[0].x - tri[1].x; 121 | a.y = tri[0].y - tri[1].y; 122 | a.z = tri[0].z - tri[1].z; 123 | 124 | // b 125 | b.x = tri[1].x - tri[2].x; 126 | b.y = tri[1].y - tri[2].y; 127 | b.z = tri[1].z - tri[2].z; 128 | 129 | // This next bit of code does the cross product 130 | normal.x = (a.y * b.z) - (a.z * b.y); 131 | normal.y = (a.z * b.x) - (a.x * b.z); 132 | normal.z = (a.x * b.y) - (a.y * b.x); 133 | 134 | // now we have to normalise - this is basically the same as getting a unit vector. 135 | // we divide each element of the vector by its magnitude. 136 | // we should do it by hand but to save time I'm just calling a built in function 137 | 138 | return normal.normalize(); 139 | 140 | } 141 | 142 | //-------------------------------------------------------------- 143 | void ofApp::draw(){ 144 | 145 | 146 | ofBackground(0); 147 | 148 | // cam gives us proper openGL world coordinates, like proper openGL 149 | cam.begin(); 150 | 151 | // We're rotating using OF, because OFs projection matrix is a bit screwy if we use the native openGL method. 152 | 153 | ofRotateY(ofGetElapsedTimeMillis()/20); 154 | 155 | // glColor3f(0.0f, 0.0f, 1.0f); // blue 156 | // glBegin(GL_TRIANGLES); 157 | // glVertex3f(-1.0f, 0.0f, 0.0f); 158 | // glVertex3f(0.0f, -1.0f, 0.0f); 159 | // glVertex3f(1.0f, 0.0f, 0.0f); 160 | // glEnd(); 161 | 162 | // We're going to draw some triangles 163 | 164 | glBegin(GL_TRIANGLES); 165 | 166 | // This loop takes a bunch of vertices and correctly draws them as a series of triangles. 167 | 168 | for (int i = dim ; i < points.size(); i++) { 169 | 170 | ofVec3f vec; 171 | 172 | vec.set(points[i].x, points[i].y, points[i].z); 173 | vec = vec.normalize() * noises[i]; 174 | glNormal3f(vec.x, vec.y, vec.z); 175 | glVertex3f(points[i].x, points[i].y, points[i].z); 176 | 177 | 178 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 179 | vec = vec.normalize() * noises[i-1]; 180 | glNormal3f(vec.x, vec.y, vec.z); 181 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 182 | 183 | 184 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 185 | vec = vec.normalize() * noises[i-dim]; 186 | glNormal3f(vec.x, vec.y, vec.z); 187 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 188 | 189 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 190 | vec = vec.normalize() * noises[i-1]; 191 | glNormal3f(vec.x, vec.y, vec.z); 192 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 193 | 194 | if (dim > 1) { 195 | 196 | vec.set(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 197 | vec = vec.normalize() * noises[i-dim-i]; 198 | glNormal3f(vec.x, vec.y, vec.z); 199 | glVertex3f(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 200 | 201 | } else { 202 | 203 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 204 | vec = vec.normalize() * noises[i-dim]; 205 | glNormal3f(vec.x, vec.y, vec.z); 206 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 207 | 208 | } 209 | 210 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 211 | vec = vec.normalize()* noises[i-dim]; 212 | glNormal3f(vec.x, vec.y, vec.z); 213 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 214 | 215 | 216 | } 217 | 218 | glEnd(); 219 | 220 | cam.end(); 221 | } 222 | 223 | //-------------------------------------------------------------- 224 | void ofApp::keyPressed(int key){ 225 | 226 | } 227 | 228 | //-------------------------------------------------------------- 229 | void ofApp::keyReleased(int key){ 230 | 231 | } 232 | 233 | //-------------------------------------------------------------- 234 | void ofApp::mouseMoved(int x, int y){ 235 | 236 | //move the light using the mouse. 237 | GLfloat position[] = { static_cast((float) ofGetWidth()-mouseX), static_cast((float) mouseY), 1000.0f, 1.0f }; 238 | glLightfv(GL_LIGHT0, GL_POSITION, position); 239 | 240 | } 241 | 242 | //-------------------------------------------------------------- 243 | void ofApp::mouseDragged(int x, int y, int button){ 244 | 245 | } 246 | 247 | //-------------------------------------------------------------- 248 | void ofApp::mousePressed(int x, int y, int button){ 249 | 250 | } 251 | 252 | //-------------------------------------------------------------- 253 | void ofApp::mouseReleased(int x, int y, int button){ 254 | 255 | } 256 | 257 | //-------------------------------------------------------------- 258 | void ofApp::mouseEntered(int x, int y){ 259 | 260 | } 261 | 262 | //-------------------------------------------------------------- 263 | void ofApp::mouseExited(int x, int y){ 264 | 265 | } 266 | 267 | //-------------------------------------------------------------- 268 | void ofApp::windowResized(int w, int h){ 269 | 270 | } 271 | 272 | //-------------------------------------------------------------- 273 | void ofApp::gotMessage(ofMessage msg){ 274 | 275 | } 276 | 277 | //-------------------------------------------------------------- 278 | void ofApp::dragEvent(ofDragInfo dragInfo){ 279 | 280 | } 281 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/bump-map/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | 11 | ofVec3f normal(std::vector tri); 12 | 13 | void keyPressed(int key); 14 | void keyReleased(int key); 15 | void mouseMoved(int x, int y); 16 | void mouseDragged(int x, int y, int button); 17 | void mousePressed(int x, int y, int button); 18 | void mouseReleased(int x, int y, int button); 19 | void mouseEntered(int x, int y); 20 | void mouseExited(int x, int y); 21 | void windowResized(int w, int h); 22 | void dragEvent(ofDragInfo dragInfo); 23 | void gotMessage(ofMessage msg); 24 | 25 | 26 | int dim; 27 | float spacing; 28 | int size; 29 | 30 | ofCamera cam; 31 | 32 | std::vector points; 33 | 34 | std::vector noises; 35 | }; 36 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/mesh/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // This example uses pure openGL (mostly) to create a procedural sphere 4 | 5 | //-------------------------------------------------------------- 6 | void ofApp::setup(){ 7 | 8 | ofEnableDepthTest(); 9 | 10 | dim=20; 11 | spacing = ((PI * 2) / dim); 12 | size = 2; 13 | 14 | // This code generates all the vertices we need to make a sphere 15 | for (int i = 0; i < dim + 1; i++) { 16 | 17 | // ******* Calculate the current ring position ******** 18 | 19 | // To calculate the current ring position, we divide our spacing variable in half 20 | // This is because otherwise, the cosine / sine waves will 21 | // generate twice as many numbers as we need (e.g. positive then negative) 22 | // We only need the first half of the wave 23 | 24 | // z is the position of the current ring 25 | float z = size * cos(spacing / 2 * i); 26 | 27 | // This calculates the size of the current ring 28 | float s = size * sin(spacing / 2 * i); 29 | 30 | // For each ring.. 31 | 32 | for (int j = 0; j < dim ; j++) { 33 | 34 | // ...create the next point in the circle at the current size s, at the current depth z 35 | 36 | ofVec3f point; 37 | 38 | point.set(cos(spacing * j) * s,sin(spacing * j) * s,z); 39 | 40 | points.push_back(point); 41 | 42 | } 43 | } 44 | 45 | cout << "done"; 46 | 47 | //We are using ofCamera, which sets up a projection matrix like the ones we have built by hand. 48 | //Here we can set the clipping planes 49 | //And also the camera position 50 | //If we wanted we could adjust the FOV and a bunch of other stuff 51 | 52 | 53 | cam.setNearClip(1); 54 | cam.setFarClip(-100); 55 | cam.setPosition(0,0,10); 56 | 57 | } 58 | 59 | 60 | //-------------------------------------------------------------- 61 | void ofApp::update(){ 62 | 63 | 64 | } 65 | 66 | 67 | //-------------------------------------------------------------- 68 | void ofApp::draw(){ 69 | 70 | 71 | ofBackground(0); 72 | 73 | // cam gives us proper openGL world coordinates, like proper openGL 74 | cam.begin(); 75 | 76 | // We're rotating using OF, because OFs projection matrix is a bit screwy if we use the native openGL method. 77 | 78 | ofRotateY(ofGetElapsedTimeMillis()/20); 79 | 80 | glBegin(GL_LINE_LOOP); 81 | 82 | // you could do this instead 83 | // glBegin(GL_TRIANGLES); 84 | 85 | for (int i = dim ; i < points.size(); i++) { 86 | 87 | ofVec3f vec; 88 | 89 | glVertex3f(points[i].x, points[i].y, points[i].z); 90 | 91 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 92 | 93 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 94 | 95 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 96 | 97 | glVertex3f(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 98 | 99 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 100 | 101 | } 102 | 103 | glEnd(); 104 | 105 | cam.end(); 106 | } 107 | 108 | //-------------------------------------------------------------- 109 | void ofApp::keyPressed(int key){ 110 | 111 | } 112 | 113 | //-------------------------------------------------------------- 114 | void ofApp::keyReleased(int key){ 115 | 116 | } 117 | 118 | //-------------------------------------------------------------- 119 | void ofApp::mouseMoved(int x, int y){ 120 | 121 | } 122 | 123 | //-------------------------------------------------------------- 124 | void ofApp::mouseDragged(int x, int y, int button){ 125 | 126 | } 127 | 128 | //-------------------------------------------------------------- 129 | void ofApp::mousePressed(int x, int y, int button){ 130 | 131 | } 132 | 133 | //-------------------------------------------------------------- 134 | void ofApp::mouseReleased(int x, int y, int button){ 135 | 136 | } 137 | 138 | //-------------------------------------------------------------- 139 | void ofApp::mouseEntered(int x, int y){ 140 | 141 | } 142 | 143 | //-------------------------------------------------------------- 144 | void ofApp::mouseExited(int x, int y){ 145 | 146 | } 147 | 148 | //-------------------------------------------------------------- 149 | void ofApp::windowResized(int w, int h){ 150 | 151 | } 152 | 153 | //-------------------------------------------------------------- 154 | void ofApp::gotMessage(ofMessage msg){ 155 | 156 | } 157 | 158 | //-------------------------------------------------------------- 159 | void ofApp::dragEvent(ofDragInfo dragInfo){ 160 | 161 | } 162 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/mesh/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | void keyPressed(int key); 11 | void keyReleased(int key); 12 | void mouseMoved(int x, int y); 13 | void mouseDragged(int x, int y, int button); 14 | void mousePressed(int x, int y, int button); 15 | void mouseReleased(int x, int y, int button); 16 | void mouseEntered(int x, int y); 17 | void mouseExited(int x, int y); 18 | void windowResized(int w, int h); 19 | void dragEvent(ofDragInfo dragInfo); 20 | void gotMessage(ofMessage msg); 21 | 22 | 23 | int dim; 24 | float spacing; 25 | int size; 26 | 27 | ofCamera cam; 28 | 29 | std::vector points; 30 | 31 | }; 32 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/sphereMeshDeform/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | //-------------------------------------------------------------- 4 | void ofApp::setup(){ 5 | 6 | ofSetVerticalSync(true); 7 | ofBackground(0); 8 | 9 | plane.set(200, 200); 10 | 11 | //You can call these in update or draw if you want - they work interactively 12 | 13 | plane.setResolution(100); 14 | plane.setPosition(ofGetWidth()*0.5, ofGetHeight()*0.5, 0); 15 | //plane.rotate(45,1,0,0); 16 | 17 | ofSetSmoothLighting(true); 18 | pointLight.setDiffuseColor( ofFloatColor(1, 1, 1) ); 19 | pointLight.setSpecularColor( ofFloatColor(1, 1, 1)); 20 | pointLight.setAreaLight(500, 500); 21 | 22 | myImage.load("of.png"); 23 | //myImage.getTexture().setTextureWrap( GL_REPEAT, GL_REPEAT ); 24 | 25 | } 26 | 27 | //-------------------------------------------------------------- 28 | void ofApp::update(){ 29 | 30 | plane.rotate(1,0.2,0.5,0); 31 | 32 | 33 | } 34 | 35 | //-------------------------------------------------------------- 36 | void ofApp::draw(){ 37 | 38 | 39 | 40 | //We are going to start by copying the plane's mesh into the deformPlane vboMesh 41 | ofEnableDepthTest(); 42 | 43 | //ofEnableLighting(); 44 | //pointLight.enable(); 45 | 46 | myImage.getTexture().bind(); 47 | plane.mapTexCoordsFromTexture( myImage.getTexture() ); 48 | 49 | deformPlane = plane.getMesh(); 50 | 51 | //We're going to create a 3 dimensional float vector to store each vertex 52 | 53 | ofVec3f vertexValue; 54 | 55 | //Now we are going to run a loop so that we can transform the z value of the deformPlane mesh 56 | 57 | for(int i = 0; i < deformPlane.getNumIndices(); i++ ) { // the loop goes through all the vertexes 58 | 59 | 60 | amp = 0.00001; // updates slowing 61 | 62 | int vertexIndex = deformPlane.getIndex(i); 63 | 64 | vertexValue = deformPlane.getVertex(vertexIndex); // get the current vertex value and store it. 65 | 66 | float er = cos(sin(mouseX*amp*i)); 67 | float ery = cos(cos(mouseY*amp*i)); 68 | 69 | vertexValue.x *= er; // make the vec value equal something else. Anything.. 70 | vertexValue.y *= ery; 71 | vertexValue.z *= er*ery; 72 | 73 | deformPlane.setVertex( vertexIndex, vertexValue ); // write it all back, including the new z. 74 | 75 | } 76 | 77 | 78 | //plane.draw(); //This draws the original, non-deformed plane mesh 79 | 80 | //This next line gets the current position, rotation, scale etc. of the plane mesh. 81 | //Anything we draw after this will retain these coordinates / transforms. 82 | 83 | plane.transformGL(); 84 | 85 | //Now we draw our newly deformed plane 86 | deformPlane.draw(); 87 | 88 | //If you want, restore the coordinate system so that we can 89 | //draw something else without inheriting the plane's transforms 90 | plane.restoreTransformGL(); 91 | 92 | myImage.getTexture().unbind(); 93 | 94 | ofDisableDepthTest(); 95 | 96 | } 97 | 98 | //-------------------------------------------------------------- 99 | void ofApp::keyPressed(int key){ 100 | 101 | } 102 | 103 | //-------------------------------------------------------------- 104 | void ofApp::keyReleased(int key){ 105 | 106 | } 107 | 108 | //-------------------------------------------------------------- 109 | void ofApp::mouseMoved(int x, int y ){ 110 | 111 | } 112 | 113 | //-------------------------------------------------------------- 114 | void ofApp::mouseDragged(int x, int y, int button){ 115 | 116 | } 117 | 118 | //-------------------------------------------------------------- 119 | void ofApp::mousePressed(int x, int y, int button){ 120 | 121 | } 122 | 123 | //-------------------------------------------------------------- 124 | void ofApp::mouseReleased(int x, int y, int button){ 125 | 126 | } 127 | 128 | //-------------------------------------------------------------- 129 | void ofApp::mouseEntered(int x, int y){ 130 | 131 | } 132 | 133 | //-------------------------------------------------------------- 134 | void ofApp::mouseExited(int x, int y){ 135 | 136 | } 137 | 138 | //-------------------------------------------------------------- 139 | void ofApp::windowResized(int w, int h){ 140 | 141 | } 142 | 143 | //-------------------------------------------------------------- 144 | void ofApp::gotMessage(ofMessage msg){ 145 | 146 | } 147 | 148 | //-------------------------------------------------------------- 149 | void ofApp::dragEvent(ofDragInfo dragInfo){ 150 | 151 | } 152 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/sphereMeshDeform/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | 7 | public: 8 | void setup(); 9 | void update(); 10 | void draw(); 11 | 12 | void keyPressed(int key); 13 | void keyReleased(int key); 14 | void mouseMoved(int x, int y ); 15 | void mouseDragged(int x, int y, int button); 16 | void mousePressed(int x, int y, int button); 17 | void mouseReleased(int x, int y, int button); 18 | void mouseEntered(int x, int y); 19 | void mouseExited(int x, int y); 20 | void windowResized(int w, int h); 21 | void dragEvent(ofDragInfo dragInfo); 22 | void gotMessage(ofMessage msg); 23 | 24 | 25 | // We're going to get a plane and deform it. 26 | 27 | ofSpherePrimitive plane; 28 | ofVboMesh deformPlane; 29 | 30 | ofLight pointLight; 31 | 32 | ofImage myImage; 33 | 34 | float amp=0; 35 | 36 | }; 37 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/surface-normal/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "ofApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofSetupOpenGL(1024,768, OF_WINDOW); // <-------- setup the GL context 8 | 9 | // this kicks off the running of my app 10 | // can be OF_WINDOW or OF_FULLSCREEN 11 | // pass in width and height too: 12 | ofRunApp( new ofApp()); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/surface-normal/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // This example uses pure openGL (mostly) to create a procedural sphere, and light it by computing the correct surface normals. 4 | 5 | //-------------------------------------------------------------- 6 | void ofApp::setup(){ 7 | 8 | ofEnableDepthTest(); 9 | 10 | dim=30; 11 | spacing = ((PI * 2) / dim); 12 | size = 2; 13 | 14 | // This code generates all the vertices we need to make a sphere 15 | for (int i = 0; i < dim + 1; i++) { 16 | 17 | // ******* Calculate the current ring position ******** 18 | 19 | // To calculate the current ring position, we divide our spacing variable in half 20 | // This is because otherwise, the cosine / sine waves will 21 | // generate twice as many numbers as we need (e.g. positive then negative) 22 | // We only need the first half of the wave 23 | 24 | // z is the position of the current ring 25 | float z = size * cos(spacing / 2 * i); 26 | 27 | // This calculates the size of the current ring 28 | float s = size * sin(spacing / 2 * i); 29 | 30 | // For each ring.. 31 | 32 | for (int j = 0; j < dim ; j++ ) { 33 | 34 | // ...create the next point in the circle at the current size s, at the current depth z 35 | 36 | ofVec3f point; 37 | 38 | point.set(cos(spacing * j) * s,sin(spacing * j) * s,z); 39 | 40 | points.push_back(point); 41 | 42 | } 43 | } 44 | 45 | cout << "done"; 46 | 47 | // this just prints out all the vertices. 48 | for (int i = 0; i < points.size(); i++) { 49 | 50 | cout << points[i].x << " " << points[i].y << " " << points[i].z << endl; 51 | 52 | } 53 | 54 | 55 | //We are using ofCamera, which sets up a projection matrix like the ones we have built by hand. 56 | //Here we can set the clipping planes 57 | //And also the camera position 58 | //If we wanted we could adjust the FOV and a bunch of other stuff 59 | 60 | 61 | cam.setNearClip(1); 62 | cam.setFarClip(-100); 63 | cam.setPosition(0,0,10); 64 | 65 | // This turns on the lighting 66 | glEnable(GL_LIGHTING); 67 | glEnable(GL_LIGHT0); 68 | 69 | // This generates a bunch of arrays that we can pass in as lighting parameters 70 | GLfloat ambientLight[] = { 0.2f, 0.2f, 0.2f, 1.0f }; 71 | GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f }; 72 | GLfloat specularLight[] = { 0.15f, 0.15f, 0.15f, 1.0f }; 73 | // GLfloat position[] = { -1.5f, 1.0f, -4.0f, 1.0f }; 74 | 75 | // These fuctions define the characteristics of our light, GL_LIGHT0 - remember there are 8 lights 76 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); 77 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); 78 | glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); 79 | // glLightfv(GL_LIGHT0, GL_POSITION, position); 80 | 81 | // This turns on color tracking. Might be on by default in openFrameworks 82 | glEnable(GL_COLOR_MATERIAL); 83 | // This sets the material properties. Important for lighting. 84 | glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); 85 | 86 | //Again, I think this is the default mode in OF. But if you aren't using OF you need to do this manually 87 | 88 | glShadeModel(GL_SMOOTH); // Can also be flat. When flat, the last colour specified for a triangle is the colour of the triangle 89 | 90 | } 91 | 92 | 93 | //-------------------------------------------------------------- 94 | void ofApp::update(){ 95 | 96 | 97 | } 98 | 99 | //-------------------------------------------------------------- 100 | 101 | ofVec3f ofApp::normal(std::vector tri) { 102 | 103 | //Each face in the geometry needs to have a normal vector perpendicular to the face. 104 | //This is computed by getting the cross product of two points along the surface, a and b. 105 | 106 | ofVec3f a, b, normal; 107 | 108 | // We need to work out what a and b are 109 | // using CCW 110 | 111 | // a 112 | a.x = tri[0].x - tri[1].x; 113 | a.y = tri[0].y - tri[1].y; 114 | a.z = tri[0].z - tri[1].z; 115 | 116 | // b 117 | b.x = tri[1].x - tri[2].x; 118 | b.y = tri[1].y - tri[2].y; 119 | b.z = tri[1].z - tri[2].z; 120 | 121 | // This next bit of code does the cross product 122 | normal.x = (a.y * b.z) - (a.z * b.y); 123 | normal.y = (a.z * b.x) - (a.x * b.z); 124 | normal.z = (a.x * b.y) - (a.y * b.x); 125 | 126 | // now we have to normalise - this is basically the same as getting a unit vector. 127 | // we divide each element of the vector by its magnitude. 128 | // we should do it by hand but to save time I'm just calling a built in function 129 | 130 | return normal.normalize(); 131 | 132 | } 133 | 134 | //-------------------------------------------------------------- 135 | void ofApp::draw(){ 136 | 137 | 138 | ofBackground(0); 139 | 140 | // cam gives us proper openGL world coordinates, like proper openGL 141 | cam.begin(); 142 | 143 | // We're rotating using OF, because OFs projection matrix is a bit screwy if we use the native openGL method. 144 | 145 | ofRotateY(ofGetElapsedTimeMillis()/20); 146 | 147 | // glColor3f(0.0f, 0.0f, 1.0f); // blue 148 | // glBegin(GL_TRIANGLES); 149 | // glVertex3f(-1.0f, 0.0f, 0.0f); 150 | // glVertex3f(0.0f, -1.0f, 0.0f); 151 | // glVertex3f(1.0f, 0.0f, 0.0f); 152 | // glEnd(); 153 | 154 | // We're going to draw some triangles 155 | 156 | glBegin(GL_TRIANGLES); 157 | 158 | // This loop takes a bunch of vertices and correctly draws them as a series of triangles. 159 | 160 | for (int i = dim; i < points.size(); i++) { 161 | 162 | //calculate the normal 163 | 164 | //get the triangle 165 | std::vector triangle; 166 | ofVec3f vec; 167 | vec.set(points[i].x, points[i].y, points[i].z); 168 | triangle.push_back(vec); 169 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 170 | triangle.push_back(vec); 171 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 172 | triangle.push_back(vec); 173 | 174 | //Get the normal for this triangle 175 | vec = ofApp::normal(triangle); 176 | 177 | //set the normal. 178 | glNormal3f(vec.x, vec.y, vec.z); 179 | //draw the triangle 180 | glVertex3f(points[i].x, points[i].y, points[i].z); 181 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 182 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 183 | 184 | 185 | // then the next pass of triangle 186 | std::vector triangle2; 187 | 188 | if (dim >1) { 189 | 190 | vec.set(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 191 | triangle2.push_back(vec); 192 | 193 | } else { 194 | 195 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 196 | triangle2.push_back(vec); 197 | 198 | } 199 | 200 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 201 | triangle2.push_back(vec); 202 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 203 | triangle2.push_back(vec); 204 | 205 | vec = ofApp::normal(triangle2); 206 | 207 | glNormal3f(vec.x, vec.y, vec.z); 208 | glVertex3f(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 209 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 210 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 211 | 212 | } 213 | 214 | glEnd(); 215 | 216 | cam.end(); 217 | } 218 | 219 | //-------------------------------------------------------------- 220 | void ofApp::keyPressed(int key){ 221 | 222 | } 223 | 224 | //-------------------------------------------------------------- 225 | void ofApp::keyReleased(int key){ 226 | 227 | } 228 | 229 | //-------------------------------------------------------------- 230 | void ofApp::mouseMoved(int x, int y){ 231 | 232 | //move the light using the mouse. 233 | GLfloat position[] = { static_cast((float) ofGetWidth()-mouseX), static_cast((float) mouseY), 100.0f, 1.0f }; 234 | glLightfv(GL_LIGHT0, GL_POSITION, position); 235 | 236 | } 237 | 238 | //-------------------------------------------------------------- 239 | void ofApp::mouseDragged(int x, int y, int button){ 240 | 241 | } 242 | 243 | //-------------------------------------------------------------- 244 | void ofApp::mousePressed(int x, int y, int button){ 245 | 246 | } 247 | 248 | //-------------------------------------------------------------- 249 | void ofApp::mouseReleased(int x, int y, int button){ 250 | 251 | } 252 | 253 | //-------------------------------------------------------------- 254 | void ofApp::mouseEntered(int x, int y){ 255 | 256 | } 257 | 258 | //-------------------------------------------------------------- 259 | void ofApp::mouseExited(int x, int y){ 260 | 261 | } 262 | 263 | //-------------------------------------------------------------- 264 | void ofApp::windowResized(int w, int h){ 265 | 266 | } 267 | 268 | //-------------------------------------------------------------- 269 | void ofApp::gotMessage(ofMessage msg){ 270 | 271 | } 272 | 273 | //-------------------------------------------------------------- 274 | void ofApp::dragEvent(ofDragInfo dragInfo){ 275 | 276 | } 277 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/surface-normal/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | 11 | ofVec3f normal(std::vector tri); 12 | 13 | void keyPressed(int key); 14 | void keyReleased(int key); 15 | void mouseMoved(int x, int y); 16 | void mouseDragged(int x, int y, int button); 17 | void mousePressed(int x, int y, int button); 18 | void mouseReleased(int x, int y, int button); 19 | void mouseEntered(int x, int y); 20 | void mouseExited(int x, int y); 21 | void windowResized(int w, int h); 22 | void dragEvent(ofDragInfo dragInfo); 23 | void gotMessage(ofMessage msg); 24 | 25 | 26 | int dim; 27 | float spacing; 28 | int size; 29 | 30 | ofCamera cam; 31 | 32 | std::vector points; 33 | 34 | ofMesh myMesh; 35 | 36 | }; 37 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/triangles/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "ofApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofSetupOpenGL(1024,768, OF_WINDOW); // <-------- setup the GL context 8 | 9 | // this kicks off the running of my app 10 | // can be OF_WINDOW or OF_FULLSCREEN 11 | // pass in width and height too: 12 | ofRunApp( new ofApp()); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/triangles/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // This example uses pure openGL (mostly) to create a procedural sphere 4 | 5 | //-------------------------------------------------------------- 6 | void ofApp::setup(){ 7 | 8 | ofEnableDepthTest(); 9 | 10 | dim=20; 11 | spacing = ((PI * 2) / dim); 12 | size = 2; 13 | 14 | // This code generates all the vertices we need to make a sphere 15 | for (int i = 0; i < dim + 1; i++) { 16 | 17 | // ******* Calculate the current ring position ******** 18 | 19 | // To calculate the current ring position, we divide our spacing variable in half 20 | // This is because otherwise, the cosine / sine waves will 21 | // generate twice as many numbers as we need (e.g. positive then negative) 22 | // We only need the first half of the wave 23 | 24 | // z is the position of the current ring 25 | float z = size * cos(spacing / 2 * i); 26 | 27 | // This calculates the size of the current ring 28 | float s = size * sin(spacing / 2 * i); 29 | 30 | // For each ring.. 31 | 32 | for (int j = 0; j < dim ; j++) { 33 | 34 | // ...create the next point in the circle at the current size s, at the current depth z 35 | 36 | ofVec3f point; 37 | 38 | point.set(cos(spacing * j) * s,sin(spacing * j) * s,z); 39 | 40 | points.push_back(point); 41 | 42 | } 43 | } 44 | 45 | cout << "done"; 46 | 47 | //We are using ofCamera, which sets up a projection matrix like the ones we have built by hand. 48 | //Here we can set the clipping planes 49 | //And also the camera position 50 | //If we wanted we could adjust the FOV and a bunch of other stuff 51 | 52 | 53 | cam.setNearClip(1); 54 | cam.setFarClip(-100); 55 | cam.setPosition(0,0,10); 56 | 57 | } 58 | 59 | 60 | //-------------------------------------------------------------- 61 | void ofApp::update(){ 62 | 63 | 64 | } 65 | 66 | 67 | //-------------------------------------------------------------- 68 | void ofApp::draw(){ 69 | 70 | 71 | ofBackground(0); 72 | 73 | // cam gives us proper openGL world coordinates, like proper openGL 74 | cam.begin(); 75 | 76 | // We're rotating using OF, because OFs projection matrix is a bit screwy if we use the native openGL method. 77 | 78 | ofRotateY(ofGetElapsedTimeMillis()/20); 79 | 80 | glBegin(GL_TRIANGLES); 81 | 82 | // you could do this instead 83 | // glBegin(GL_TRIANGLES); 84 | 85 | for (int i = dim ; i < points.size(); i++) { 86 | 87 | ofVec3f vec; 88 | 89 | glVertex3f(points[i].x, points[i].y, points[i].z); 90 | 91 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 92 | 93 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 94 | 95 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 96 | 97 | if (i > dim) { 98 | 99 | glVertex3f(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 100 | 101 | } else { 102 | 103 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 104 | 105 | } 106 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 107 | 108 | } 109 | 110 | glEnd(); 111 | 112 | cam.end(); 113 | } 114 | 115 | //-------------------------------------------------------------- 116 | void ofApp::keyPressed(int key){ 117 | 118 | } 119 | 120 | //-------------------------------------------------------------- 121 | void ofApp::keyReleased(int key){ 122 | 123 | } 124 | 125 | //-------------------------------------------------------------- 126 | void ofApp::mouseMoved(int x, int y){ 127 | 128 | } 129 | 130 | //-------------------------------------------------------------- 131 | void ofApp::mouseDragged(int x, int y, int button){ 132 | 133 | } 134 | 135 | //-------------------------------------------------------------- 136 | void ofApp::mousePressed(int x, int y, int button){ 137 | 138 | } 139 | 140 | //-------------------------------------------------------------- 141 | void ofApp::mouseReleased(int x, int y, int button){ 142 | 143 | } 144 | 145 | //-------------------------------------------------------------- 146 | void ofApp::mouseEntered(int x, int y){ 147 | 148 | } 149 | 150 | //-------------------------------------------------------------- 151 | void ofApp::mouseExited(int x, int y){ 152 | 153 | } 154 | 155 | //-------------------------------------------------------------- 156 | void ofApp::windowResized(int w, int h){ 157 | 158 | } 159 | 160 | //-------------------------------------------------------------- 161 | void ofApp::gotMessage(ofMessage msg){ 162 | 163 | } 164 | 165 | //-------------------------------------------------------------- 166 | void ofApp::dragEvent(ofDragInfo dragInfo){ 167 | 168 | } 169 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/triangles/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | void keyPressed(int key); 11 | void keyReleased(int key); 12 | void mouseMoved(int x, int y); 13 | void mouseDragged(int x, int y, int button); 14 | void mousePressed(int x, int y, int button); 15 | void mouseReleased(int x, int y, int button); 16 | void mouseEntered(int x, int y); 17 | void mouseExited(int x, int y); 18 | void windowResized(int w, int h); 19 | void dragEvent(ofDragInfo dragInfo); 20 | void gotMessage(ofMessage msg); 21 | 22 | 23 | int dim; 24 | float spacing; 25 | int size; 26 | 27 | ofCamera cam; 28 | 29 | std::vector points; 30 | 31 | }; 32 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/vertex-normal/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ofMain.h" 2 | #include "ofApp.h" 3 | 4 | //======================================================================== 5 | int main( ){ 6 | 7 | ofSetupOpenGL(1024,768, OF_WINDOW); // <-------- setup the GL context 8 | 9 | // this kicks off the running of my app 10 | // can be OF_WINDOW or OF_FULLSCREEN 11 | // pass in width and height too: 12 | ofRunApp( new ofApp()); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/vertex-normal/ofApp.cpp: -------------------------------------------------------------------------------- 1 | #include "ofApp.h" 2 | 3 | // This example uses pure openGL (mostly) to create a procedural sphere, and light it by computing the correct surface normals. 4 | 5 | //-------------------------------------------------------------- 6 | void ofApp::setup(){ 7 | 8 | ofEnableDepthTest(); 9 | 10 | dim=20; 11 | spacing = ((PI * 2) / dim); 12 | size = 2; 13 | 14 | // This code generates all the vertices we need to make a sphere 15 | for (int i = 0; i < dim + 1; i++) { 16 | 17 | // ******* Calculate the current ring position ******** 18 | 19 | // To calculate the current ring position, we divide our spacing variable in half 20 | // This is because otherwise, the cosine / sine waves will 21 | // generate twice as many numbers as we need (e.g. positive then negative) 22 | // We only need the first half of the wave 23 | 24 | // z is the position of the current ring 25 | float z = size * cos(spacing / 2 * i); 26 | 27 | // This calculates the size of the current ring 28 | float s = size * sin(spacing / 2 * i); 29 | 30 | // For each ring.. 31 | 32 | for (int j = 0; j < dim ; j++) { 33 | 34 | // ...create the next point in the circle at the current size s, at the current depth z 35 | 36 | ofVec3f point; 37 | 38 | point.set(cos(spacing * j) * s,sin(spacing * j) * s,z); 39 | 40 | points.push_back(point); 41 | 42 | } 43 | } 44 | 45 | cout << "done"; 46 | 47 | // this creates noise which we can use as a bump map. 48 | for (int i = 0; i < points.size(); i++) { 49 | 50 | cout << points[i].x << " " << points[i].y << " " << points[i].z << endl; 51 | 52 | } 53 | 54 | 55 | //We are using ofCamera, which sets up a projection matrix like the ones we have built by hand. 56 | //Here we can set the clipping planes 57 | //And also the camera position 58 | //If we wanted we could adjust the FOV and a bunch of other stuff 59 | 60 | 61 | cam.setNearClip(1); 62 | cam.setFarClip(-100); 63 | cam.setPosition(0,0,10); 64 | 65 | // This turns on the lighting 66 | glEnable(GL_LIGHTING); 67 | glEnable(GL_LIGHT0); 68 | 69 | // This generates a bunch of arrays that we can pass in as lighting parameters 70 | GLfloat ambientLight[] = { 0.2f, 0.2f, 0.2f, 1.0f }; 71 | GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f }; 72 | GLfloat specularLight[] = { 0.15f, 0.15f, 0.15f, 1.0f }; 73 | // GLfloat position[] = { -1.5f, 1.0f, -4.0f, 1.0f }; 74 | 75 | // These fuctions define the characteristics of our light, GL_LIGHT0 - remember there are 8 lights 76 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); 77 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); 78 | glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); 79 | // glLightfv(GL_LIGHT0, GL_POSITION, position); 80 | 81 | // This turns on color tracking. Might be on by default in openFrameworks 82 | glEnable(GL_COLOR_MATERIAL); 83 | // This sets the material properties. Important for lighting. 84 | glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); 85 | 86 | //Again, I think this is the default mode in OF. But if you aren't using OF you need to do this manually 87 | 88 | glShadeModel(GL_SMOOTH); // Can also be flat. When flat, the last colour specified for a triangle is the colour of the triangle 89 | 90 | } 91 | 92 | 93 | //-------------------------------------------------------------- 94 | void ofApp::update(){ 95 | 96 | 97 | } 98 | 99 | 100 | //-------------------------------------------------------------- 101 | void ofApp::draw(){ 102 | 103 | 104 | ofBackground(0); 105 | 106 | // cam gives us proper openGL world coordinates, like proper openGL 107 | cam.begin(); 108 | 109 | // We're rotating using OF, because OFs projection matrix is a bit screwy if we use the native openGL method. 110 | 111 | ofRotateY(ofGetElapsedTimeMillis()/20); 112 | 113 | // glColor3f(0.0f, 0.0f, 1.0f); // blue 114 | // glBegin(GL_TRIANGLES); 115 | // glVertex3f(-1.0f, 0.0f, 0.0f); 116 | // glVertex3f(0.0f, -1.0f, 0.0f); 117 | // glVertex3f(1.0f, 0.0f, 0.0f); 118 | // glEnd(); 119 | 120 | // We're going to draw some triangles 121 | 122 | glBegin(GL_TRIANGLES); 123 | 124 | // This loop takes a bunch of vertices and correctly draws them as a series of triangles. 125 | 126 | for (int i = dim ; i < points.size(); i++) { 127 | 128 | ofVec3f vec; 129 | 130 | vec.set(points[i].x, points[i].y, points[i].z); 131 | vec = vec.normalize(); 132 | glNormal3f(vec.x, vec.y, vec.z); 133 | glVertex3f(points[i].x, points[i].y, points[i].z); 134 | 135 | 136 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 137 | vec = vec.normalize(); 138 | glNormal3f(vec.x, vec.y, vec.z); 139 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 140 | 141 | 142 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 143 | vec = vec.normalize(); 144 | glNormal3f(vec.x, vec.y, vec.z); 145 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 146 | 147 | vec.set(points[i-1].x, points[i-1].y, points[i-1].z); 148 | vec = vec.normalize(); 149 | glNormal3f(vec.x, vec.y, vec.z); 150 | glVertex3f(points[i-1].x, points[i-1].y, points[i-1].z); 151 | 152 | if (i > dim) { 153 | 154 | vec.set(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 155 | vec = vec.normalize(); 156 | glNormal3f(vec.x, vec.y, vec.z); 157 | glVertex3f(points[i-dim-1].x, points[i-dim-1].y, points[i-dim-1].z); 158 | } else { 159 | 160 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 161 | vec = vec.normalize(); 162 | glNormal3f(vec.x, vec.y, vec.z); 163 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 164 | 165 | } 166 | 167 | 168 | vec.set(points[i-dim].x, points[i-dim].y, points[i-dim].z); 169 | vec = vec.normalize(); 170 | glNormal3f(vec.x, vec.y, vec.z); 171 | glVertex3f(points[i-dim].x, points[i-dim].y, points[i-dim].z); 172 | 173 | 174 | } 175 | 176 | glEnd(); 177 | 178 | cam.end(); 179 | } 180 | 181 | //-------------------------------------------------------------- 182 | void ofApp::keyPressed(int key){ 183 | 184 | } 185 | 186 | //-------------------------------------------------------------- 187 | void ofApp::keyReleased(int key){ 188 | 189 | } 190 | 191 | //-------------------------------------------------------------- 192 | void ofApp::mouseMoved(int x, int y){ 193 | 194 | //move the light using the mouse. 195 | GLfloat position[] = { static_cast((float) ofGetWidth()-mouseX), static_cast((float) mouseY), 1000.0f, 1.0f }; 196 | glLightfv(GL_LIGHT0, GL_POSITION, position); 197 | 198 | } 199 | 200 | //-------------------------------------------------------------- 201 | void ofApp::mouseDragged(int x, int y, int button){ 202 | 203 | } 204 | 205 | //-------------------------------------------------------------- 206 | void ofApp::mousePressed(int x, int y, int button){ 207 | 208 | } 209 | 210 | //-------------------------------------------------------------- 211 | void ofApp::mouseReleased(int x, int y, int button){ 212 | 213 | } 214 | 215 | //-------------------------------------------------------------- 216 | void ofApp::mouseEntered(int x, int y){ 217 | 218 | } 219 | 220 | //-------------------------------------------------------------- 221 | void ofApp::mouseExited(int x, int y){ 222 | 223 | } 224 | 225 | //-------------------------------------------------------------- 226 | void ofApp::windowResized(int w, int h){ 227 | 228 | } 229 | 230 | //-------------------------------------------------------------- 231 | void ofApp::gotMessage(ofMessage msg){ 232 | 233 | } 234 | 235 | //-------------------------------------------------------------- 236 | void ofApp::dragEvent(ofDragInfo dragInfo){ 237 | 238 | } 239 | -------------------------------------------------------------------------------- /week2-more-examples/openGL/vertex-normal/ofApp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ofMain.h" 4 | 5 | class ofApp : public ofBaseApp{ 6 | public: 7 | void setup(); 8 | void update(); 9 | void draw(); 10 | 11 | void keyPressed(int key); 12 | void keyReleased(int key); 13 | void mouseMoved(int x, int y); 14 | void mouseDragged(int x, int y, int button); 15 | void mousePressed(int x, int y, int button); 16 | void mouseReleased(int x, int y, int button); 17 | void mouseEntered(int x, int y); 18 | void mouseExited(int x, int y); 19 | void windowResized(int w, int h); 20 | void dragEvent(ofDragInfo dragInfo); 21 | void gotMessage(ofMessage msg); 22 | 23 | 24 | int dim; 25 | float spacing; 26 | int size; 27 | 28 | ofCamera cam; 29 | 30 | std::vector points; 31 | 32 | std::vector noises; 33 | }; 34 | -------------------------------------------------------------------------------- /week2-more-examples/readme.md: -------------------------------------------------------------------------------- 1 | # What is this? 2 | A bunch of reasonably well documented examples that help you get started with artificial life, fractals, image processing (including transformations and kernels), procedural openGL and shaders. 3 | -------------------------------------------------------------------------------- /week2-slides/C++ memory and pointers-week-2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/week2-slides/C++ memory and pointers-week-2.pdf -------------------------------------------------------------------------------- /week2-slides/pointers_demo_code/main.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | using namespace std; 5 | 6 | // this variable holds a value 7 | int x = 1000; 8 | 9 | // This is a pointer. It points to the address of some data 10 | int * myPointer = 0; 11 | 12 | // This is a pointer to a block of reserved memory on the HEAP. This is great. But we need to remember to delete it before the program quits. 13 | int * myInts = new int[100]; 14 | 15 | 16 | int main(int argc, const char * argv[]) { 17 | 18 | for (int i = 0; i < 100 ; i++) { 19 | 20 | myInts[i]=i; 21 | 22 | } 23 | 24 | myPointer = &x; 25 | 26 | cout << "The memory address of x is " << &x << endl; 27 | cout << "The memory addres of myPointer is " << myPointer << endl; 28 | cout << "The data stored at the memory address referenced by myPointer is " << * myPointer << endl; 29 | cout << "The data stored at the 9th index of myInts is " << myInts[9] << endl; 30 | 31 | // Make sure you do this when you are finished with your memory!!! 32 | delete [] myInts; 33 | 34 | return 0; 35 | } 36 | -------------------------------------------------------------------------------- /week2-slides/week-2-C++-common-issues.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ual-cci/MSc-Coding-2/18c293831db1325bb8b87dd63ec77a90c7160e69/week2-slides/week-2-C++-common-issues.pdf -------------------------------------------------------------------------------- /week4-notebooks/Numpy starter.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 9, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "the shape of this array is (100,)\n", 13 | "[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n", 14 | " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n", 15 | " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n", 16 | " 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n", 17 | " 0. 0. 0. 0.]\n", 18 | "the shape of this array is (5, 5, 5)\n", 19 | "[[[0. 0. 0. 0. 0.]\n", 20 | " [0. 0. 0. 0. 0.]\n", 21 | " [0. 0. 0. 0. 0.]\n", 22 | " [0. 0. 0. 0. 0.]\n", 23 | " [0. 0. 0. 0. 0.]]\n", 24 | "\n", 25 | " [[0. 0. 0. 0. 0.]\n", 26 | " [0. 0. 0. 0. 0.]\n", 27 | " [0. 0. 0. 0. 0.]\n", 28 | " [0. 0. 0. 0. 0.]\n", 29 | " [0. 0. 0. 0. 0.]]\n", 30 | "\n", 31 | " [[0. 0. 0. 0. 0.]\n", 32 | " [0. 0. 0. 0. 0.]\n", 33 | " [0. 0. 0. 0. 0.]\n", 34 | " [0. 0. 0. 0. 0.]\n", 35 | " [0. 0. 0. 0. 0.]]\n", 36 | "\n", 37 | " [[0. 0. 0. 0. 0.]\n", 38 | " [0. 0. 0. 0. 0.]\n", 39 | " [0. 0. 0. 0. 0.]\n", 40 | " [0. 0. 0. 0. 0.]\n", 41 | " [0. 0. 0. 0. 0.]]\n", 42 | "\n", 43 | " [[0. 0. 0. 0. 0.]\n", 44 | " [0. 0. 0. 0. 0.]\n", 45 | " [0. 0. 0. 0. 0.]\n", 46 | " [0. 0. 0. 0. 0.]\n", 47 | " [0. 0. 0. 0. 0.]]]\n", 48 | "the shape of this array is (2, 2, 2, 2, 2, 2)\n", 49 | "[[[[[[0. 0.]\n", 50 | " [0. 0.]]\n", 51 | "\n", 52 | " [[0. 0.]\n", 53 | " [0. 0.]]]\n", 54 | "\n", 55 | "\n", 56 | " [[[0. 0.]\n", 57 | " [0. 0.]]\n", 58 | "\n", 59 | " [[0. 0.]\n", 60 | " [0. 0.]]]]\n", 61 | "\n", 62 | "\n", 63 | "\n", 64 | " [[[[0. 0.]\n", 65 | " [0. 0.]]\n", 66 | "\n", 67 | " [[0. 0.]\n", 68 | " [0. 0.]]]\n", 69 | "\n", 70 | "\n", 71 | " [[[0. 0.]\n", 72 | " [0. 0.]]\n", 73 | "\n", 74 | " [[0. 0.]\n", 75 | " [0. 0.]]]]]\n", 76 | "\n", 77 | "\n", 78 | "\n", 79 | "\n", 80 | " [[[[[0. 0.]\n", 81 | " [0. 0.]]\n", 82 | "\n", 83 | " [[0. 0.]\n", 84 | " [0. 0.]]]\n", 85 | "\n", 86 | "\n", 87 | " [[[0. 0.]\n", 88 | " [0. 0.]]\n", 89 | "\n", 90 | " [[0. 0.]\n", 91 | " [0. 0.]]]]\n", 92 | "\n", 93 | "\n", 94 | "\n", 95 | " [[[[0. 0.]\n", 96 | " [0. 0.]]\n", 97 | "\n", 98 | " [[0. 0.]\n", 99 | " [0. 0.]]]\n", 100 | "\n", 101 | "\n", 102 | " [[[0. 0.]\n", 103 | " [0. 0.]]\n", 104 | "\n", 105 | " [[0. 0.]\n", 106 | " [0. 0.]]]]]]\n", 107 | "[[[0.18753781 0.74943082]\n", 108 | " [0.12240633 0.75597671]]\n", 109 | "\n", 110 | " [[0.62732017 0.54206716]\n", 111 | " [0.42402718 0.54841184]]]\n" 112 | ] 113 | } 114 | ], 115 | "source": [ 116 | "import numpy as np\n", 117 | "\n", 118 | "# creates an empty 1D array with 100 elements\n", 119 | "i = np.zeros([100])\n", 120 | "\n", 121 | "# creates an empty 3D array with 5 * 5 * 5 elements\n", 122 | "x = np.zeros([5,5,5])\n", 123 | "\n", 124 | "# creates a multidimensional array 3 * 2 by 2 blocks \n", 125 | "y = np.zeros([2,2]*3)\n", 126 | "\n", 127 | "j = np.random.random([2,2,2])\n", 128 | "\n", 129 | "print (\"the shape of this array is \", np.shape(i))\n", 130 | "\n", 131 | "print (i)\n", 132 | "\n", 133 | "print (\"the shape of this array is \", np.shape(x))\n", 134 | "\n", 135 | "print (x)\n", 136 | "\n", 137 | "print (\"the shape of this array is \", np.shape(y))\n", 138 | "\n", 139 | "print(y)\n", 140 | "\n", 141 | "print(j)\n", 142 | "\n", 143 | "z = np.arange(100).reshape(2, 5, 10)\n", 144 | "\n", 145 | "#print(z)" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [] 154 | } 155 | ], 156 | "metadata": { 157 | "kernelspec": { 158 | "display_name": "Python 3", 159 | "language": "python", 160 | "name": "python3" 161 | }, 162 | "language_info": { 163 | "codemirror_mode": { 164 | "name": "ipython", 165 | "version": 3 166 | }, 167 | "file_extension": ".py", 168 | "mimetype": "text/x-python", 169 | "name": "python", 170 | "nbconvert_exporter": "python", 171 | "pygments_lexer": "ipython3", 172 | "version": "3.8.5" 173 | } 174 | }, 175 | "nbformat": 4, 176 | "nbformat_minor": 2 177 | } 178 | -------------------------------------------------------------------------------- /week4-notebooks/Untitled.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 3, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import gensim\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from gensim.summarization import summarize\n", 19 | "\n", 20 | "\n", 21 | "\n", 22 | "print (mySummary = summarize(text,word_count=150))" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "kernelspec": { 28 | "display_name": "Python 3", 29 | "language": "python", 30 | "name": "python3" 31 | }, 32 | "language_info": { 33 | "codemirror_mode": { 34 | "name": "ipython", 35 | "version": 3 36 | }, 37 | "file_extension": ".py", 38 | "mimetype": "text/x-python", 39 | "name": "python", 40 | "nbconvert_exporter": "python", 41 | "pygments_lexer": "ipython3", 42 | "version": "3.8.5" 43 | } 44 | }, 45 | "nbformat": 4, 46 | "nbformat_minor": 4 47 | } 48 | -------------------------------------------------------------------------------- /week4-notebooks/lines.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Bokeh Plot 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 |
38 | 39 | 40 | 41 | 42 | 43 | 46 | 82 | 83 | 84 | 85 | --------------------------------------------------------------------------------