229 | ```
230 |
231 |
232 |
233 | For more information:
234 | 1. [Transforming Data Using Pipes](https://angular.io/guide/pipes)
235 | 2. [Angular Pipes](https://dotnettutorials.net/lesson/angular-pipes/)
236 | 3. [Angular 7 Pipes](https://www.javatpoint.com/angular-7-pipes)
237 |
238 |
239 |
240 |
241 |
242 |
--------------------------------------------------------------------------------
/angular/angular-services.md:
--------------------------------------------------------------------------------
1 |
2 | # Angular Services
3 |
4 | Angular services are singleton objects which get instantiated only once during the lifetime of an application. It contains method that maintain data throughout the life of an application, i.e. data does not get refreshed and is available all the time. The main objective of a service is to organize and share business logic, models, or data and functions with different components of an Angular application.
5 |
6 | **Benefits**
7 |
8 | An Angular service is a stateless object and provides some very useful functions. These functions can be invoked from any component of Angular, like Controllers, Directives, etc. This helps in dividing the web application into small, different logical units which can be reused.
9 |
10 | ```typescript
11 | import { Injectable } from '@angular/core';
12 | import { Http } from '@angular/http';
13 |
14 | @Injectable({ // The Injectable decorator is required for dependency injection to work
15 | // providedIn option registers the service with a specific NgModule
16 | providedIn: 'root', // This declares the service with the root app (AppModule)
17 | })
18 | export class RepoService {
19 | constructor(private http: Http) { }
20 |
21 | fetchAll() {
22 | return this.http.get('https://api.github.com/repositories');
23 | }
24 | }
25 | ```
26 |
27 | The above service uses Http service as a dependency.
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | For more information:
48 |
49 | [Introduction to services and dependency injection](https://angular.io/guide/architecture-services)
--------------------------------------------------------------------------------
/angular/images-components/angular-component.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-components/angular-component.png
--------------------------------------------------------------------------------
/angular/images-directives/Attributes-Directives.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-directives/Attributes-Directives.png
--------------------------------------------------------------------------------
/angular/images-directives/difference-between-directives-components.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-directives/difference-between-directives-components.png
--------------------------------------------------------------------------------
/angular/images-directives/dom-manipulation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-directives/dom-manipulation.png
--------------------------------------------------------------------------------
/angular/images-directives/types-of-directives.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-directives/types-of-directives.png
--------------------------------------------------------------------------------
/angular/images-pipes/Types-of-Pipes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-pipes/Types-of-Pipes.png
--------------------------------------------------------------------------------
/angular/images-pipes/anular-pipes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-pipes/anular-pipes.png
--------------------------------------------------------------------------------
/angular/images-pipes/student-table-output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-pipes/student-table-output.png
--------------------------------------------------------------------------------
/angular/images-routing/1.1-spa-single-page-app-home.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/1.1-spa-single-page-app-home.png
--------------------------------------------------------------------------------
/angular/images-routing/1.2-spa-single-page-app-departments.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/1.2-spa-single-page-app-departments.png
--------------------------------------------------------------------------------
/angular/images-routing/1.3-spa-single-page-app-employees.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/1.3-spa-single-page-app-employees.png
--------------------------------------------------------------------------------
/angular/images-routing/1.4-spa-single-page-app-products.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/1.4-spa-single-page-app-products.png
--------------------------------------------------------------------------------
/angular/images-routing/1.5-spa-single-page-app-folder-structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/1.5-spa-single-page-app-folder-structure.png
--------------------------------------------------------------------------------
/angular/images-routing/2.1-wildcard-route.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/2.1-wildcard-route.png
--------------------------------------------------------------------------------
/angular/images-routing/2.2-wildcard-route.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/2.2-wildcard-route.png
--------------------------------------------------------------------------------
/angular/images-routing/3.1-route-parameters-department-list-home.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/3.1-route-parameters-department-list-home.png
--------------------------------------------------------------------------------
/angular/images-routing/3.2-route-parameters-department-clicked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/3.2-route-parameters-department-clicked.png
--------------------------------------------------------------------------------
/angular/images-routing/4.1-route-parameters-previous.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/4.1-route-parameters-previous.png
--------------------------------------------------------------------------------
/angular/images-routing/4.2-route-parameters-next.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/4.2-route-parameters-next.png
--------------------------------------------------------------------------------
/angular/images-routing/4.3-route-parammap-observable-next.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/4.3-route-parammap-observable-next.png
--------------------------------------------------------------------------------
/angular/images-routing/5.1-optional-route-parameters-back.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/5.1-optional-route-parameters-back.png
--------------------------------------------------------------------------------
/angular/images-routing/5.2-optional-route-parameters-selected-list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/5.2-optional-route-parameters-selected-list.png
--------------------------------------------------------------------------------
/angular/images-routing/7.1-department-list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/7.1-department-list.png
--------------------------------------------------------------------------------
/angular/images-routing/7.2-child-route-department-overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/7.2-child-route-department-overview.png
--------------------------------------------------------------------------------
/angular/images-routing/7.3-child-route-department-contact.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/7.3-child-route-department-contact.png
--------------------------------------------------------------------------------
/angular/images-routing/7.4-optional-route-param-active-highlight.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/7.4-optional-route-param-active-highlight.png
--------------------------------------------------------------------------------
/angular/images-routing/angular-logo-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/images-routing/angular-logo-1.png
--------------------------------------------------------------------------------
/angular/lifecycle/lifecycle-hooks.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/angular/lifecycle/lifecycle-hooks.webp
--------------------------------------------------------------------------------
/aws/AwsLambda.md:
--------------------------------------------------------------------------------
1 | # AWS Lambda
2 |
3 |
4 | # Introducing AWS Lambda
5 |
6 | Lambda is a simple compute service that runs your code in response to certain events.
7 | These events can be anything, from an upload operation of an object to an S3 bucket, a
8 | record insertion in a DynamoDB table, or even some form of event triggered from your
9 | mobile app. The idea here is simple--you simply provide your code to AWS Lambda.
10 | Lambda will internally take care of provisioning and managing the underlying
11 | infrastructure resources, making sure your code gets deployed successfully; even things like
12 | your code's scalability and high availability are taken care of by Lambda itself!
13 |
14 | # Key benefits of serverless computing
15 |
16 | ## No ware to manage:
17 |
18 | Perhaps one of the biggest reasons for the hype about
19 | serverless computing is the fact there is absolutely no hardware or software to
20 | manage. The management of the serverless computing environment all the way
21 | from the underlying hardware to the OS, to even the application's platform layer,
22 | is managed by the cloud provider itself.
23 |
24 | ## Faster execution time:
25 | Unlike your standard cloud instances, which generally
26 | take a good minute or two to boot up, functions, on the other hand, spin up very
27 | quickly, mostly in a matter of seconds. This could be due to the fact that the
28 | functions are made to run on top of a containerized platform.
29 |
30 | ## Really low costs:
31 | Since there is virtually no opex involved with serverless
32 | computing, it is fairly cheap, even when compared to hosting and managing
33 | instances in the cloud. Also, the pricing model for serverless computing is a little
34 | different from that of your traditional cloud pricing model. Here, you are
35 | generally billed on the duration of your function's execution and the amount of
36 | memory it consumed during its execution period. The duration is calculated from
37 | the time your code begins executing until it returns or otherwise terminates and
38 | is rounded up to the nearest 100 ms.
39 |
40 | ## Support of popular programming languages:
41 | Most cloud providers that provide
42 | serverless computing frameworks today, support a variety of programming
43 | languages, such as Java, Node.js, Python, and even C#. Azure functions allows
44 | the use of F#, PHP, Bash, Batch and PowerShell scripts in addition to the few
45 | mentioned.
46 |
47 | ## Microservices compatible:
48 | Since serverless computing functions are small,
49 | independent chunks of code that are designed to perform a very specific set of
50 | roles or activities, they can be used as a delivery medium for microservices as
51 | well. This comes as a huge advantage as compared to hosting your monolithic
52 | applications on the cloud, which do not scale that effectively.
53 |
54 | ## Event-driven applications:
55 | Serverless functions are an ideal choice for designing
56 | and running event-driven applications that react to certain events and take some
57 | action against them. For example, an image upload operation to a cloud storage
58 | triggers a function that creates associated thumbnail images for the same.
59 |
60 | There are a few cons to serverless computing as well that you should be aware of before we proceed further:
61 |
62 | # Cons or Disadvantage of serverless computing
63 |
64 | ## Execution duration:
65 | Serverless functions are designed to run for short durations
66 | of time, ideally somewhere under 300 seconds only. This is a hard limit set by
67 | most cloud providers, however, there are a few workarounds to this as well.
68 | Stateless: Serverless functions are purely stateless, which means that once the
69 | function completes its execution or is terminated for some reason, it won't store
70 | any data locally on its disk.
71 |
72 | ## Complexity:
73 | The smaller you make things, the more complex it's going to
74 | become. Although writing functions that perform very particular tasks is a good
75 | idea, it can cause complexity issues when you view your application as a whole
76 | system. A simple example can break one large application into some ten different
77 | functions such that each perform a specific task. Now you need to manage ten
78 | different entities rather than just one. Imagine if you had a thousand functions
79 | instead.
80 |
81 | ## Lack of tools:
82 | Although serverless computing is all at its hype, it still doesn't
83 | provide a lot of out-of-the-box tools for management, deployment, and even
84 | monitoring. Most of your monitoring tools that you use today were designed for
85 | long-running, complex applications; not for simple functions that execute in a
86 | mere seconds.
87 |
88 | ## Vendor lock-in:
89 | With each cloud provider providing its own unique tool sets and
90 | services around serverless computing, you often tend to get tied down to a
91 | particular vendor. This means that you cannot change your cloud provider
92 | without making some changes to your functions as well.
93 |
94 | # Lambda function
95 |
96 |

97 |
98 | Reference : https://aws.amazon.com/lambda/
99 |
100 | AWS supports Java, Python, Node.js, and even C# as programming languages for your functions.
101 | Each function can be invoked either on demand or invoked dynamically based on certain types of supported events.
102 |
103 | A few event examples are listed out as follows:
104 |
105 | **Amazon S3**: Lambda functions can be triggered when an object is created, updated, or deleted in an S3 bucket
106 |
107 | **Amazon DynamoDB**: Lambda functions are triggered when any updates are made to a particular DynamoDB table, such as row insertion, deletion, and so on
108 |
109 | **Amazon Simple Notification Service (SNS)**: Trigger a Lambda function when a message is published on a, SNS topic
110 |
111 | **Amazon CloudWatch Logs**: Use Lambda functions to process CloudWatch Logs as feeds
112 |
113 | **Scheduled events**: Run Lambda functions as scheduled events, just like a cron job
114 |
115 | **AWS CodeCommit**: Execute Lambda functions whenever new code is pushed to an existing branch, and so on
116 |
--------------------------------------------------------------------------------
/aws/images/CloudWatch dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/CloudWatch dashboard.png
--------------------------------------------------------------------------------
/aws/images/CloudWatch-Metrics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/CloudWatch-Metrics.png
--------------------------------------------------------------------------------
/aws/images/Cost and usage report showing monthly costs grouped by service.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/Cost and usage report showing monthly costs grouped by service.png
--------------------------------------------------------------------------------
/aws/images/Cost and usage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/Cost and usage.png
--------------------------------------------------------------------------------
/aws/images/RI Coverage report.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/RI Coverage report.png
--------------------------------------------------------------------------------
/aws/images/RI Utilization report.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/RI Utilization report.png
--------------------------------------------------------------------------------
/aws/images/S3 Storage Classes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/S3 Storage Classes.png
--------------------------------------------------------------------------------
/aws/images/lambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/aws/images/lambda.png
--------------------------------------------------------------------------------
/database/Queries.md:
--------------------------------------------------------------------------------
1 | #Queries
2 |
3 |
4 | # Find Nth highest salary in SQL - Oracle, MSSQL and MySQL
5 |
6 |
7 |
8 | # Nth maximum salary in MySQL using LIMIT keyword
9 |
10 | MySQL supports a LIMIT keyword, which provides pagination capability. You can find the nth highest salary in MySQL without using subquery as shown below:
11 |
12 | The benefit of this approach is that it's faster than a correlated query approach but its vendor dependent. This solution will only work in a MySQL database
13 | ```sql
14 | SELECT salary FROM Employee ORDER BY salary DESC LIMIT N-1, 1
15 |
16 | ```
17 |
18 | ## 2nd highest salary in MySQL without subquery:
19 |
20 |
21 | ```sql
22 | SELECT salary FROM Employee ORDER BY salary DESC LIMIT 1,1
23 |
24 | ```
25 |
26 | ## 3rd highest salary in MySQL using LIMIT clause:
27 |
28 | ```sql
29 | SELECT salary FROM Employee ORDER BY salary DESC LIMIT 2,1
30 |
31 | ```
32 |
33 |
34 |
35 |
36 | ```sql
37 |
38 | ```
39 |
40 | ```sql
41 |
42 | ```
43 |
44 | ```sql
45 |
46 | ```
47 |
48 | ```sql
49 |
50 | ```
51 |
52 |
53 |
54 | Ref: https://javarevisited.blogspot.com/2016/01/4-ways-to-find-nth-highest-salary-in.html
55 |
56 |
57 |
--------------------------------------------------------------------------------
/devops/docker/docker.md:
--------------------------------------------------------------------------------
1 | Docker
2 | ====================
--------------------------------------------------------------------------------
/devops/git/git.md:
--------------------------------------------------------------------------------
1 | Git
2 | ====================
3 |
4 | > Git is an open source distributed version control system and source code management (SCM) system with an insistence to control small and large projects with speed and efficiency.
5 |
6 |
7 | Advantages of git
8 | ------
9 | - Data repetition and data replication is possible
10 | - It is a much applicable service
11 | - For one depository you can have only one directory of Git
12 | - The network performance and disk application are excellent
13 | - It is effortless to collaborate on any project
14 | - You can work on any plan within the Git
15 |
16 |
17 | git push
18 | --------
19 | `git push` updates remote refs along with related objects
20 |
21 |
22 | git pull vs git fetch
23 | --------
24 | `git pull` command pulls innovation or commits from a specific branch from your central repository and updates your object branch in your local repository.
25 |
26 | `git fetch` is also used for the same objective, but it works in a slightly different method. When you behave a git fetch, it pulls all new commits from the desired branch and saves it in a new branch in your local repository. If you need to reflect these changes in your target branch, git fetch should be followed with a git merge. Your target branch will only be restored after combining the target branch and fetched branch. To make it simple for you, remember the equation below:
27 |
28 | > git pull = git fetch + git merge
29 |
30 | conflict
31 | --------
32 | A 'conflict' appears when the commit that has to be combined has some change in one place, and the current act also has a change at the same place. Git will not be easy to predict which change should take precedence.
33 |
34 | resolve a conflict
35 | ----------
36 | If you need to resolve a conflict in Git, edit the list for fixing the different changes, and then you can run "git add" to add the resolved directory, and after that, you can run the 'git commit' for committing the repaired merge.
37 |
38 | git clone
39 | --------
40 | The git clone command generates a copy of a current Git repository. To get the copy of a central repository, 'cloning' is the simplest way used by programmers.
41 |
42 | git pull origin
43 | --------
44 | pull is a get and a consolidation. 'git pull origin master' brings submits from the master branch of the source remote (into the local origin/master branch), and then it combines origin/master into the branch you currently have looked out.
45 |
46 | git commit a
47 | --------
48 | Git commits "records changes to the storehouse" while git push " updates remote refs along with contained objects" So the first one is used in a network with your local repository, while the latter one is used to communicate with a remote repository.
49 |
50 |
51 |
52 | For more information:
53 |
54 | 1. [GIT Interview Questions](https://www.javatpoint.com/git-interview-questions)
55 |
56 |
57 |
--------------------------------------------------------------------------------
/devops/jenkins/images/AWS Code Deploy pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/devops/jenkins/images/AWS Code Deploy pipeline.png
--------------------------------------------------------------------------------
/devops/jenkins/images/Continuous Delivery Pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/devops/jenkins/images/Continuous Delivery Pipeline.png
--------------------------------------------------------------------------------
/devops/jenkins/jenkins.md:
--------------------------------------------------------------------------------
1 | Jenkins
2 | ====================
3 |
4 | Continuous delivery
5 | --------
6 |
7 | Continuous delivery is a software engineering practice in which agile teams produce software in continuous low-risk short cycles, ensuring that software can be reliably released at any time. This makes it possible to continuously adapt software inline with user feedback and changes in business requirements.
8 |
9 | In this methodology Development, Test, Support and Operations work together as one delivery team to automate the streamline the build-test-release process as much as possible.
10 |
11 |

12 |
13 | By following continuous delivery approach, a single commit in git repository can trigger the automated delivery pipeline taking your code to production, making it available to customers.
14 |
15 | If you are using Jenkins, AWS, and CodeDeploy, then the pipeline might look like below:
16 |
17 |

18 |
19 | Reference:
20 | https://www.thoughtworks.com/continuous-delivery
21 | https://en.wikipedia.org/wiki/Continuous_delivery
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/devops/kubernetes/kubernetes.md:
--------------------------------------------------------------------------------
1 | Kubernetes
2 | ====================
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/ds-algo/Data-Types.md:
--------------------------------------------------------------------------------
1 | # Data Types
2 |
3 | There are two types of data types:
4 | • System-defined data types (also called Primitive data types)
5 | • User-defined data types
6 |
7 | ## System-defined data types (Primitive data types)
8 |
9 | Data types that are defined by system are called primitive data types. The primitive data types provided by many programming languages are: int, float, char, double, bool, etc. The number of bits allocated for each primitive data type depends on the programming languages, the compiler and the operating system. For the same primitive data type, different languages may use different sizes. Depending on the size of the data types, the total available values (domain) will also change.
10 |
11 | For example, “int” may take 2 bytes or 4 bytes. If it takes 2 bytes (16 bits), then the total possible values are minus 32,768 to plus 32,767 (-215 to 215-1). If it takes 4 bytes (32 bits), then the possible values are between -2,147,483,648 and +2,147,483,647 (-231 to 231-1). The same is the case with other data types.
12 |
13 | ## User defined data types
14 |
15 | If the system-defined data types are not enough, then most programming languages allow the users to define their own data types, called user – defined data types. Good examples of user defined data types are: structures in C/C + + and classes in Java
16 |
17 | # Data Structures
18 |
19 | A data structure is a special format for organizing and storing data. General data structure types include arrays, files, linked lists, stacks, queues, trees, graphs and so on.
20 |
21 | Depending on the organization of the elements, data structures are classified into two types:
22 |
23 | ## Linear data structures:
24 |
25 | Elements are accessed in a sequential order but it is not compulsory to store all elements sequentially. Examples: Linked Lists, Stacks and Queues.
26 |
27 | ## Non – linear data structures:
28 |
29 | Elements of this data structure are stored/accessed in a non-linear order. Examples: Trees and graphs.
30 |
31 | # Commonly Used Rates of Growth
32 |
33 | The diagram below shows the relationship between different rates of growth.
34 |
35 |

36 |
37 | Below is the list of growth rates.
38 |
39 |

40 |
41 | # Arrays
42 |
43 | One memory block is allocated for the entire array to hold the elements of the array. The array elements can be accessed in constant time by using the index of the particular element as the subscript.
44 |
45 |

46 |
47 | To access an array element, the address of an element is computed as an offset from the base address of the array and one multiplication is needed to compute what is supposed to be added to the base address to get the memory address of the element. First the size of an element of that data type is calculated and then it is multiplied with the index of the element to get the value to be added to the base address.
48 |
49 | This process takes one multiplication and one addition. Since these two operations take constant time, we can say the array access can be performed in constant time.
50 |
51 | ## Advantages of Arrays
52 |
53 | - Simple and easy to use
54 | - Faster access to the elements (constant access)
55 |
56 | ## Disadvantages of Arrays
57 |
58 | Preallocates all needed memory up front and wastes memory space for indices in the array that are empty.
59 |
60 | - Fixed size: The size of the array is static (specify the array size before using it).
61 | - One block allocation: To allocate the array itself at the beginning, sometimes it may not be possible to get the memory for the complete array (if the array size is big).
62 | - Complex position-based insertion: To insert an element at a given position, we may need to shift the existing elements. This will create a position for us to insert the new element at the desired position. If the position at which we want to add an element is at the beginning, then the shifting operation is more expensive.
63 |
64 | ## Dynamic Arrays
65 |
66 | Dynamic array (also called as growable array, resizable array, dynamic table, or array list) is a random access, variable-size list data structure that allows elements to be added or removed.
67 |
68 | One simple way of implementing dynamic arrays is to initially start with some fixed size array. As soon as that array becomes full, create the new array double the size of the original array.
69 |
70 | Similarly, reduce the array size to half if the elements in the array are less than half.
71 |
72 | Note: We will see the implementation for dynamic arrays in the Stacks, Queues and Hashing chapters.
73 |
74 | # Linked List
75 |
76 | A linked list is a data structure used for storing collections of data. A linked list has the following properties.
77 |
78 | - Successive elements are connected by pointers
79 | - The last element points to NULL
80 | - Can grow or shrink in size during execution of a program
81 | - Can be made just as long as required (until systems memory exhausts)
82 | - Does not waste memory space (but takes some extra memory for pointers). It allocates memory as list grows.
83 |
84 |

85 |
86 | ## Advantages of Linked Lists
87 |
88 | The advantage of linked lists is that they can be expanded in constant time. To create an array, we must allocate memory for a certain number of elements. To add more elements to the array when full, we must create a new array and copy the old array into the new array. This can take a lot of time.
89 |
90 | We can prevent this by allocating lots of space initially but then we might allocate more than we need and waste memory. With a linked list, we can start with space for just one allocated element and add on new elements easily without the need to do any copying and reallocating.
91 |
92 | ## Disadvantages of Linked Lists
93 |
94 | There are a number of issues with linked lists. The main disadvantage of linked lists is access time to individual elements. Array is random-access, which means it takes O(1) to access any element in the array. Linked lists take O(n) for access to an element in the list in the worst case. Another advantage of arrays in access time is spacial locality in memory. Arrays are defined as contiguous blocks of memory, and so any array element will be physically near its neighbors. This greatly benefits from modern CPU caching methods.
95 |
96 | Although the dynamic allocation of storage is a great advantage, the overhead with storing and retrieving data can make a big difference. Sometimes linked lists are hard to manipulate. If the last item is deleted, the last but one must then have its pointer changed to hold a NULL reference. This requires that the list is traversed to find the last but one link, and its pointer set to a NULL reference.
97 |
98 | Finally, linked lists waste memory in terms of extra reference points.
99 |
100 | ## Comparison of Linked Lists with Arrays & Dynamic Arrays
101 |
102 |

103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
--------------------------------------------------------------------------------
/ds-algo/Problems.md:
--------------------------------------------------------------------------------
1 |
2 | # Anagram
3 |
4 | Given two strings str1 and str2, write a function to determine whether str2 is an anagram of str1.
5 |
6 |
7 | Example 1:
8 | ```log
9 | Input: s = "anagram", t = "nagaram"
10 | Output: true
11 | ```
12 |
13 | Example 2:
14 | ```log
15 | Input: s = "rat", t = "car"
16 | Output: false
17 | ```
18 |
19 | You can assume that the string contains only lowercase letters.
20 |
21 | **Advanced:**
22 |
23 | What if the input string contains unicode characters? Can you adjust your solution to deal with this situation?
24 |
25 | ## **Problem analysis**
26 |
27 | Alphabetic eccentrics mean that if two strings are eccentrics of each other, then the number and types of characters in the two strings are the same. The difference is the position and sequence of each character. The easiest way is to directly sort the strings according to certain rules, and then traverse the comparison. This method saves space, but because it involves sorting, the time complexity is just that O(nlgn).
28 |
29 | There is also a method similar to counting and sorting, which is to count the number of all characters in a string, and then compare another string. This can reduce the time complexity to O(n)that, if the string in this question If it only contains lowercase letters, we can open up an array with a length of 26, so that no extra space is needed, but if the input string contains unicode characters, because the unicode character set is too large, the constant level array becomes less Preferably, we can consider using a structure like a hash table for storage. The logic is the same as before, but the space complexity here is no longer O(1), butO(n)
30 |
31 | ## Code implementation (sorting)
32 | ```java
33 | //Time Complexity O(nlgn)
34 | public boolean isAnagram(String s, String t) {
35 | if ((s == null) || (t == null) || (t.length() != s.length())) {
36 | return false;
37 | }
38 | char[] sArr1 = s.toCharArray();
39 | char[] sArr2 = t.toCharArray();
40 | Arrays.sort(sArr1);
41 | Arrays.sort(sArr2);
42 | return Arrays.equals(sArr1, sArr2);
43 | }
44 |
45 | ```
46 |
47 | ## Code implementation (hash)
48 | ```java
49 | //Time Complexity O(n)
50 | //space complexity here is butO(n)
51 | public boolean isAnagram(String s, String t) {
52 | if ((s == null) || (t == null) || (t.length() != s.length())) {
53 | return false;
54 | }
55 |
56 | int n = s.length();
57 |
58 | Map
counts = new HashMap<>();
59 |
60 | for (int i = 0; i < n; ++i) {
61 | counts.put(s.charAt(i), counts.getOrDefault(s.charAt(i), 0) + 1);
62 | }
63 |
64 | for (int i = 0; i < n; ++i) {
65 | counts.put(t.charAt(i), counts.getOrDefault(t.charAt(i), 0) - 1);
66 | if (counts.getOrDefault(t.charAt(i), -1) < 0) {
67 | return false;
68 | }
69 | }
70 |
71 | return true;
72 | }
73 |
74 | ```
75 |
76 | # Find the median of two positively ordered arrays
77 |
78 | Given two positive (small to large) arrays of size m and n, nums1 and nums2. Please find the median of these two positive arrays, and the time complexity of the algorithm is required to be O(log(m + n)). You can assume that nums1 and nums2 will not be empty at the same time.
79 |
80 | ## What is the median of an array
81 | The middle element is found by ordering all elements in sorted order and picking out the one in the middle (or if there are two middle numbers, taking the mean of those two numbers).
82 |
83 | ```log
84 | Example 1 :
85 | nums1 = [ 1 , 3 ]
86 | nums2 = [2]
87 |
88 | The median is 2.0
89 |
90 | Example 2 :
91 | nums1 = [ 1 , 2 ]
92 | nums2 = [3, 4]
93 |
94 | The median is ( 2 + 3 ) / 2 = 2.5
95 |
96 | ```
97 |
98 | ## Problem analysis
99 |
100 | ## Code
101 |
102 | ```java
103 | /* Time complexity is O(log(min(x,y))
104 | * Space complexity is O(1)
105 | */
106 | public class MedianOfTwoSortedArrayOfDifferentLength {
107 |
108 | public double findMedianSortedArrays(int input1[], int input2[]) {
109 | //if input1 length is greater than switch them so that input1 is smaller than input2.
110 | if (input1.length > input2.length) {
111 | return findMedianSortedArrays(input2, input1);
112 | }
113 | int x = input1.length;
114 | int y = input2.length;
115 |
116 | int low = 0;
117 | int high = x;
118 | while (low <= high) {
119 | int partitionX = (low + high)/2;
120 | int partitionY = (x + y + 1)/2 - partitionX;
121 |
122 | //if partitionX is 0 it means nothing is there on left side. Use -INF for maxLeftX
123 | //if partitionX is length of input then there is nothing on right side. Use +INF for minRightX
124 | int maxLeftX = (partitionX == 0) ? Integer.MIN_VALUE : input1[partitionX - 1];
125 | int minRightX = (partitionX == x) ? Integer.MAX_VALUE : input1[partitionX];
126 |
127 | int maxLeftY = (partitionY == 0) ? Integer.MIN_VALUE : input2[partitionY - 1];
128 | int minRightY = (partitionY == y) ? Integer.MAX_VALUE : input2[partitionY];
129 |
130 | if (maxLeftX <= minRightY && maxLeftY <= minRightX) {
131 | //We have partitioned array at correct place
132 | // Now get max of left elements and min of right elements to get the median in case of even length combined array size
133 | // or get max of left for odd length combined array size.
134 | if ((x + y) % 2 == 0) {
135 | return ((double)Math.max(maxLeftX, maxLeftY) + Math.min(minRightX, minRightY))/2;
136 | } else {
137 | return (double)Math.max(maxLeftX, maxLeftY);
138 | }
139 | } else if (maxLeftX > minRightY) { //we are too far on right side for partitionX. Go on left side.
140 | high = partitionX - 1;
141 | } else { //we are too far on left side for partitionX. Go on right side.
142 | low = partitionX + 1;
143 | }
144 | }
145 |
146 | //Only we we can come here is if input arrays were not sorted. Throw in that scenario.
147 | throw new IllegalArgumentException();
148 | }
149 |
150 | public static void main(String[] args) {
151 | int[] x = {1, 3, 8, 9, 15};
152 | int[] y = {7, 11, 19, 21, 18, 25};
153 |
154 | MedianOfTwoSortedArrayOfDifferentLength mm = new MedianOfTwoSortedArrayOfDifferentLength();
155 | mm.findMedianSortedArrays(x, y);//11.0
156 | }
157 | }
158 |
159 | ```
160 |
161 | ## Code (Brute Force approach)
162 |
163 | ```java
164 | public static double findMedianSortedArraysBruteForceApproach(int[] firstArray, int[] secondArray) {
165 | int firstArrayLength = firstArray.length;
166 | int secondArrayLength = secondArray.length;
167 |
168 | int[] combinedArray = new int[firstArrayLength + secondArrayLength];
169 | int k = 0;
170 | for (int i : firstArray) {
171 | combinedArray[k++] = i;
172 | }
173 |
174 | for (int i : secondArray) {
175 | combinedArray[k++] = i;
176 | }
177 |
178 | Arrays.sort(combinedArray);
179 | double median;
180 | if (combinedArray.length % 2 == 0) {
181 | median = combinedArray[combinedArray.length / 2];
182 | } else {
183 | median = (combinedArray[(combinedArray.length - 1) / 2]
184 | + combinedArray[(combinedArray.length + 1) / 2]) / 2;
185 | }
186 |
187 | return median;
188 |
189 | }
190 | ```
191 |
192 | Ref :
193 | - https://github.com/sunilsoni/interview-notes-code/blob/master/src/main/java/com/interview/notes/code/LeetCode/MedianOfTwoSortedArrayOfDifferentLength.java
194 | - https://github.com/MisterBooo/LeetCodeAnimation/blob/master/0004-median-of-two-sorted-arrays/Article/0004-median-of-two-sorted-arrays.md
195 |
196 |
197 |
198 | # Subarray Sum Equals K
199 |
200 | Given an array of integers nums and an integer k, return the total number of continuous subarrays whose sum equals to k.
201 |
202 |
203 | ## Problem analysis
204 |
205 | ```log
206 | Example 1:
207 |
208 | Input: nums = [1,1,1], k = 2
209 | Output: 2
210 |
211 | ```
212 | ```log
213 | Example 2:
214 |
215 | Input: nums = [1,2,3], k = 3
216 | Output: 2
217 | ```
218 |
219 | ## Code - Optimization by Hashmap
220 | Complexity
221 | Time complexity : O(n).
222 | Space complexity : O(n).
223 |
224 | ```java
225 | public int subarraySumOptimizationHashmap(int[] nums, int k) {
226 | int count = 0, sum = 0;
227 | HashMap map = new HashMap<>();
228 | map.put(0, 1);
229 |
230 | for (int i = 0; i < nums.length; i++) {
231 | sum += nums[i];
232 | if (map.containsKey(sum - k))
233 | count += map.get(sum - k);
234 | map.put(sum, map.getOrDefault(sum, 0) + 1);
235 | }
236 | return count;
237 | }
238 |
239 | ```
240 |
241 |
242 |
243 |
244 | ## Code - Brute Force approach
245 |
246 | Complexity
247 | Time complexity : O(n2).
248 | Space complexity : O(n).
249 |
250 | ```java
251 | public int subarraySum(int[] nums, int k) {
252 | int count = 0;
253 |
254 | int[] sum = new int[nums.length + 1];
255 | sum[0] = 0;
256 | for (int i = 1; i <= nums.length; i++)
257 | sum[i] = sum[i - 1] + nums[i - 1];
258 |
259 | for (int start = 0; start < sum.length; start++) {
260 | for (int end = start + 1; end < sum.length; end++) {
261 | if (sum[end] - sum[start] == k)
262 | count++;
263 | }
264 | }
265 |
266 | return count;
267 | }
268 |
269 | ```
270 |
271 |
272 |
273 | Ref : https://leetcode.com/problems/subarray-sum-equals-k/discuss/803317/Java-Solution-with-Detailed-Explanation
274 |
275 |
276 |
--------------------------------------------------------------------------------
/ds-algo/Sorting-algorithms.md:
--------------------------------------------------------------------------------
1 | # Sorting algorithm
2 |
3 |
4 | https://en.wikipedia.org/wiki/Sorting_algorithm
5 | https://github.com/diptangsu/Sorting-Algorithms
6 | https://github.com/TheAlgorithms/Java
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/ds-algo/Sorting.md:
--------------------------------------------------------------------------------
1 | # Sorting
2 |
3 | Sorting is an algorithm that arranges the elements of a list in a certain order [either ascending or descending]. The output is a permutation or reordering of the input.
4 |
5 | Sorting is one of the important categories of algorithms in computer science and a lot of research has gone into this category. Sorting can significantly reduce the complexity of a problem, and is often used for database algorithms and searches.
6 |
7 | ## Bubble Sort
8 |
9 | Bubble sort is the simplest sorting algorithm. It works by iterating the input array from the first element to the last, comparing each pair of elements and swapping them if needed. Bubble sort continues its iterations until no more swaps are needed.
10 |
11 | The algorithm gets its name from the way smaller elements “`bubble`” to the top of the list. Generally, insertion sort has better performance than bubble sort.
12 |
13 | The only significant advantage that bubble sort has over other implementations is that it can detect whether the input list is already sorted or not.
14 |
15 | ### Implementation
16 |
17 | ### Performance
18 |
19 |
20 | | Type | Complexity |
21 | | ----------------------------------------- | ---------------- |
22 | | Worst case complexity | O(n2) |
23 | | Best case complexity (Improved version) | O(n) |
24 | | Average case complexity (Basic version) | O(n2) |
25 | | Worst case space complexity | O(1) auxiliary |
26 |
27 | ## Selection Sort
28 |
29 | Selection sort is an in-place sorting algorithm. Selection sort works well for small files. It is used for sorting the files with very large values and small keys. This is because selection is made based on keys and swaps are made only when required.
30 |
31 | ### Advantages
32 | - Easy to implement
33 | - In-place sort (requires no additional storage space)
34 | ### Disadvantages
35 | - Doesn’t scale well: O(n2)
36 |
37 | ### Algorithm
38 | 1. Find the minimum value in the list
39 | 2. Swap it with the value in the current position
40 | 3. Repeat this process for all the elements until the entire array is sorted This algorithm is called selection sort since it repeatedly selects the smallest element.
41 |
42 | ### Implementation
43 |
44 | ### Performance
45 |
46 | | Type | Complexity |
47 | | ----------------------------------------- | ---------------- |
48 | | Worst case complexity | O(n2) |
49 | | Best case complexity | O(n2)
50 | | Average case complexity | O(n2) |
51 | | Worst case space complexity | O(1) auxiliary |
52 |
53 |
54 |
55 | ## Insertion Sort
56 |
57 | Insertion sort is a simple and efficient comparison sort. In this algorithm, each iteration removes an element from the input data and inserts it into the correct position in the list being sorted. The choice of the element being removed from the input is random and this process is repeated until all input elements have gone through.
58 |
59 | ### Advantages
60 | - Simple implementation
61 | - Efficient for small data
62 | - Adaptive: If the input list is presorted [may not be completely] then insertions sort takes O(n + d), where d is the number of inversions
63 | - Practically more efficient than selection and bubble sorts, even though all of them have O(n2) worst case complexity
64 | - Stable: Maintains relative order of input data if the keys are same
65 | - In-place: It requires only a constant amount O(1) of additional memory space
66 | - Online: Insertion sort can sort the list as it receives it
67 |
68 | ### Algorithm
69 |
70 | Every repetition of insertion sort removes an element from the input data, and inserts it into the correct position in the already-sorted list until no input elements remain. Sorting is typically done in-place. The resulting array after k iterations has the property where the first k + 1 entries are sorted.
71 |
72 |
73 |
74 | Each element greater than x is copied to the right as it is compared against x.
75 |
76 | ### Implementation
77 |
78 |
79 |
80 |
81 |
82 | ### Example
83 | Given an array: 6 8 1 4 5 3 7 2 and the goal is to put them in ascending order.
84 |
85 |
86 |
87 | ### Analysis
88 |
89 |
90 | **Worst case analysis**
91 |
92 | Worst case occurs when for every i the inner loop has to move all elements A[1], . . . , A[i – 1](which happens when A[i] = key is smaller than all of them), that takes Θ(i – 1) time.
93 |
94 |
95 |
96 | **Average case analysis**
97 |
98 | For the average case, the inner loop will insert A[i] in the middle of A[1], . . . , A[i – 1]. This takes Θ(i/2) time.
99 |
100 |
101 |
102 |
103 |
104 | ### Performance
105 |
106 | If every element is greater than or equal to every element to its left, the running time of insertion sort is Θ(n). This situation occurs if the array starts out already sorted, and so an already-sorted array is the best case for insertion sort.
107 |
108 |
109 | | Type | Complexity |
110 | | ----------------------------------------- | ---------------- |
111 | | Worst case complexity | O(n2) |
112 | | Best case complexity | Θ(n)
113 | | Average case complexity | O(n2) |
114 | | Worst case space complexity |O(n2) total, O(1) auxiliary |
115 |
116 |
117 | ### Comparisons to Other Sorting Algorithms
118 |
119 | Insertion sort is one of the elementary sorting algorithms with O(n2) worst-case time. Insertion sort is used when the data is nearly sorted (due to its adaptiveness) or when the input size is small(due to its low overhead). For these reasons and due to its stability, insertion sort is used as the recursive base case (when the problem size is small) for higher overhead divide-and-conquer sorting algorithms, such as merge sort or quick sort.
120 |
121 | **Notes:**
122 | - Bubble sort takes comparisons and swaps (inversions) in both average case and in worst case.
123 | - Selection sort takes comparisons and n swaps.
124 | - Insertion sort takes comparisons and swaps in average case and in the worst case they are double.
125 | - Insertion sort is almost linear for partially sorted input.
126 | - Selection sort is best suits for elements with bigger values and small keys.
127 |
128 |
129 | ## Shell Sort
130 |
131 | ## Merge Sort
132 |
133 | ## Heap Sort
134 |
135 | ## Quicksort
136 |
137 | ## Tree Sort
138 |
139 | ## Comparison of Sorting Algorithms
140 |
141 |
142 |
--------------------------------------------------------------------------------
/ds-algo/data-structure.md:
--------------------------------------------------------------------------------
1 | Data Structures
2 | ===============
3 |
4 |
5 | - [Lists](#lists)
6 | * [ArrayList](https://github.com/sunilsoni/interview-notes/blob/main/ds-algo/data-structure.md#arraylist)
7 | * [LinkedList](#linkedlist)
8 | * [Stack](#stack)
9 | * [Vector](#vector)
10 | * [CopyOnWriteArrayList](#copyonwritearraylist)
11 | * [Collections.synchronizedList](#collectionssynchronizedlist)
12 | - [Sets](#sets)
13 | * [HashSet](#hashset)
14 | * [LinkedHashSet](#linkedhashset)
15 | * [TreeSet](#treeset)
16 | * [ConcurrentSkipListSet](#concurrentskiplistset)
17 | * [CopyOnWriteArraySet](#copyonwritearrayset)
18 | * [EnumSet](#enumset)
19 | - [Maps](#maps)
20 | * [HashMap](#hashmap)
21 | * [HashMap implementation details](#hashmap-implementation-details)
22 | * [LinkedHashMap](#linkedhashmap)
23 | * [Hashtable](#hashtable)
24 | * [ConcurrentHashMap](#concurrenthashmap)
25 | * [TreeMap](#treemap)
26 | * [ConcurrentSkipListMap](#concurrentskiplistmap)
27 | - [Queues](#queues)
28 | * [LinkedList](#linkedlist-1)
29 | * [ArrayBlockingQueue](#arrayblockingqueue)
30 | * [LinkedBlockingQueue](#linkedblockingqueue)
31 | * [ConcurrentLinkedQueue](#concurrentlinkedqueue)
32 | * [Deque classes](#deque-classes)
33 | * [PriorityQueue](#priorityqueue)
34 | * [PriorityBlockingQueue](#priorityblockingqueue)
35 | * [DelayQueue](#delayqueue)
36 | * [SynchronousQueue](#synchronousqueue)
37 | - [equals and hashCode](#equals-and-hashcode)
38 | - [Collections class](#collections-class)
39 | * [Utility methods](#utility-methods)
40 | * [Methods returning wrapped instances](#methods-returning-wrapped-instances)
41 | - [Hierarchy and classes](#hierarchy-and-classes)
42 |
43 | ---
44 |
45 | Lists
46 | ---------------
47 |
48 | ArrayList
49 | ---------------
50 |
51 | - Backed by array (which are co-located in memory), thus fast iteration and get(i) operation.
52 | - Slow inserts when the backed array is full and has to double in size.
53 | - Fail-fast iterators, which can throw ConcurrentModificationException.
54 | - Add is O(n) - When element is added to middle of list, all elements on the right have to be moved.
55 | - [Use Case](http://stackoverflow.com/a/322742/3494368) - When iterations outnumber number of read/writes.
56 |
57 | LinkedList
58 | ---------------
59 |
60 | - Chain of nodes referencing each other (doubly linked list).
61 | - No co-location of nodes, pointers need to be chased for next element, thus slow iterations and get(i) operation.
62 | - Fail-fast iterators, which can throw ConcurrentModificationException.
63 | - Implements Queue interface, thus allows offer/pop/peek operations.
64 | - Add is O(1) - Adding element in middle of list is just adjusting the node pointers.
65 | - Internally uses references (~ to skiplist) to optimize iterations.
66 | - [Use Case](http://stackoverflow.com/a/322742/3494368) - Lot of inserts in middle of the list.
67 |
68 | | Operation | ArrayList | LinkedList |
69 | |-----------------|-----------------------------------|---------------------------|
70 | | get(i) | O(1) | O(n) |
71 | | add() | O(1) amortized | O(1) |
72 | | remove(i) | O(n) Remove and move all elements | O(n) Iterate then remove |
73 | | iterator.remove | O(n) | O(1) |
74 |
75 | Stack
76 | ------
77 |
78 | - For stack operations push/pop/peek.
79 | - Not used anymore. Recommended to use Deque implementations.
80 |
81 | #### Vector
82 |
83 | - Synchronized version of list.
84 | - Not used anymore. Recommended other list alternatives.
85 |
86 | #### CopyOnWriteArrayList
87 |
88 | - Thread-safe.
89 | - Backed array is copied during every element insert.
90 | - Avoids ConcurrentModificationException since iteration can continue in original copy, and insert results in new copy.
91 | - High memory usage (more pressure on GC) due to the resulting copies.
92 | - Use case - Large number of threads for read, low number of writes.
93 |
94 | #### Collections.synchronizedList
95 |
96 | - Thread-safe.
97 | - Can be slow due to mutual exclusion.
98 | - Iterations have to be externally synchronized by developer
99 | - Can throw ConcurrentModificationException if (above mentioned) synchronization not done during iteration.
100 |
101 | ---
102 |
103 | Sets
104 | --------
105 |
106 | Collection of unique elements. No duplicates.
107 |
108 | #### HashSet
109 |
110 | - Backed by HashMap.
111 | - Performance can vary based on hashCode implementation.
112 | - Constant time get/remove/add/contains (subject to above point).
113 | - Fail-fast iterators.
114 | - Insertion order not retained.
115 |
116 | #### LinkedHashSet
117 |
118 | - Insertion order is retained.
119 | - Uses doubly-linked list to maintain the order.
120 | - Iteration can be slower due to this.
121 | - Other features, same as HashSet above (except iteration)
122 |
123 | #### TreeSet
124 |
125 | - Elements sorted by their natural order (or Comparator passed in constructor).
126 | - Log(n) time for add/remove/contains operations.
127 | - Navigable (floor, ceiling, higher, lower, headSet, tailSet operations).
128 | - Fail fast iterators.
129 |
130 | #### ConcurrentSkipListSet
131 |
132 | - Thread-safe.
133 | - Log(n) time for add/remove/contains operations.
134 | - Navigable (floor, ceiling, higher, lower, headSet, tailSet operations).
135 | - Size method is not constant time operation.
136 | - Weakly consistent iterators (do not throw ConcurrentModificationException but also __may not__ reflect concurrently added items).
137 | - Thus, bulk operations (addAll, removeAll, retainAll, containsAll etc) are not guaranteed to be atomic.
138 |
139 | #### CopyOnWriteArraySet
140 |
141 | - Backed by CopyOnWriteArrayList
142 | - Thread-safe.
143 | - Slow. Operations have to iterate through the array for most operations.
144 | - Recommended where reads vastly outnumber writes and set size is small.
145 |
146 | #### EnumSet
147 |
148 | - To be used with Enum types.
149 | - Very efficient and fast (backed by bit-vectors).
150 | - Weakly consistent iterators.
151 | - Nulls not allowed.
152 |
153 | ---
154 |
155 | Maps
156 | ------
157 |
158 | #### HashMap
159 |
160 | - key, value pairs.
161 | - Permits a null key, and null values.
162 | - Iteration order not guaranteed.
163 | - Throws ConcurrentModificationException.
164 | - [Article detailing implementation](http://www.deepakvadgama.com/blog/java-hashmap-internals/).
165 |
166 | #### HashMap implementation details
167 |
168 | - Backed by array (buckets), array-size is known as table-size.
169 | - Position in array = element-hash % table-size.
170 | - If elements end up in same bucket, they are added to linked-list (or a balanced red-black tree).
171 | - O(1) access (if hashcode properly distributes the values, else O(n) for linked-list & O(log(n)) for tree.
172 | - Load factor - 0.75 default, decides when table-size should increase (double).
173 | - Bigger load-factor - more space-efficient, reduced speed (due to more elements in same bucket).
174 | - Lower load-factor - less space-efficient, more speed (less, ideally 1 element in 1 bucket).
175 | - Initial table-size = 16.
176 |
177 | #### LinkedHashMap
178 |
179 | - Insertion order is retained.
180 |
181 | #### Hashtable
182 |
183 | - Thread-safe.
184 | - Not used anymore, ConcurrentHashMap recommended.
185 |
186 | #### ConcurrentHashMap
187 |
188 | - Thread-safe.
189 | - Fine grained locking called striped locking (map is divided into segments, each with associated lock. Threads holding different locks don't conflict).
190 | - Improved performance over Hashtable.
191 |
192 | #### TreeMap
193 |
194 | - Sorted by keys.
195 | - Uses Red-Black tree implementation.
196 |
197 | #### ConcurrentSkipListMap
198 |
199 | - Thread-safe version of TreeMap.
200 | - Navigable (floor, ceiling, higher, lower, headSet, tailSet operations).
201 |
202 | ---
203 |
204 | Queues
205 | ---------------
206 |
207 | #### LinkedList
208 |
209 | - Implements Queue interface.
210 | - offer, peek, poll operations.
211 | - Use case - task queues
212 |
213 | #### ArrayBlockingQueue
214 |
215 | - Thread-safe.
216 | - Backed by array. Thus bounded in size.
217 | - Adding element to full queue results in blocking.
218 | - Polling an empty queue results in blocking.
219 | - Use case - Producer consumer problem.
220 |
221 | #### LinkedBlockingQueue
222 |
223 | - Thread-safe.
224 | - Backed by linked-list.
225 | - Optionally bounded in size. Takes maxSize as constructor argument.
226 |
227 | #### ConcurrentLinkedQueue
228 |
229 | - Thread-safe.
230 | - Uses CAS (Compare-And-Swap) for more throughput. Also known as lock free.
231 |
232 | #### Deque classes
233 |
234 | - ArrayDeque - Double ended queue. Backed by array. Can throw ConcurrentModificationException.
235 | - LinkedList - Implements Deque interface.
236 | - LinkedBlockingDeque
237 | - ConcurrentLinkedDeque
238 |
239 | #### PriorityQueue
240 |
241 | - Elements sorted based on their natural order (or Comparator provided in Constructor).
242 | - Use case - task queues where tasks can have different priorities.
243 |
244 | #### PriorityBlockingQueue
245 |
246 | - Thread-safe.
247 |
248 | #### DelayQueue
249 |
250 | - Elements added, are available to be removed only after their delay-time is expired.
251 |
252 | #### SynchronousQueue
253 |
254 | - Holds single elements.
255 | - Blocks for both producer and consumer to arrive.
256 | - Use case - For safe/atomic transfer of objects between threads.
257 |
258 | ---
259 |
260 | ### equals and hashCode
261 |
262 | - equals required for all collections.
263 | - equals and hashCode required for Maps and Sets (which are backed by Maps).
264 |
265 | ---
266 |
267 | Collections class
268 | ---------------
269 |
270 | #### Utility methods
271 |
272 | - sort(list, key) - guarantees stable sort
273 | - reverse
274 | - reverseOrder - returns Comparator for reversed order
275 | - shuffle
276 | - rotate(list, distance) - rotates elements by the distance specified
277 | - binarySearch(list, key)
278 | + list should be sorted else can get unpredictable results
279 | + log(n) if list implements RandomAccess, else O(n)
280 | + RandomAccess - Marker interface that says, collection supports fast random access, get(i). Typically backed by arrays.
281 |
282 | #### Methods returning wrapped instances
283 |
284 | - empty - emptyList, emptySet, emptyMap etc.
285 | - synchronized - synchronizedList, synchronizedSet, synchronizedMap etc.
286 | - unmodifiable - unmodifiableList, unmodifiableSet, unmodifiableMap etc.
287 | - singleton(t) - singleton (returns set), singletonList, singletonMap etc.
288 |
289 | ---
290 |
291 | ### Hierarchy and classes
292 |
293 |
294 |
--------------------------------------------------------------------------------
/ds-algo/images/Arrays.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Arrays.png
--------------------------------------------------------------------------------
/ds-algo/images/Comparison of Linked Lists with Arrays & Dynamic Arrays.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Comparison of Linked Lists with Arrays & Dynamic Arrays.png
--------------------------------------------------------------------------------
/ds-algo/images/Comparison of Sorting Algorithms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Comparison of Sorting Algorithms.png
--------------------------------------------------------------------------------
/ds-algo/images/Insertion Sort Average case analysis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Insertion Sort Average case analysis.png
--------------------------------------------------------------------------------
/ds-algo/images/Insertion Sort Example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Insertion Sort Example.png
--------------------------------------------------------------------------------
/ds-algo/images/Insertion Sort Implementation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Insertion Sort Implementation.png
--------------------------------------------------------------------------------
/ds-algo/images/Insertion Sort Worst case analysis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Insertion Sort Worst case analysis.png
--------------------------------------------------------------------------------
/ds-algo/images/Insertion Sort.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/Insertion Sort.png
--------------------------------------------------------------------------------
/ds-algo/images/LinkedList.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/LinkedList.png
--------------------------------------------------------------------------------
/ds-algo/images/collection-hierarchy-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/collection-hierarchy-2.png
--------------------------------------------------------------------------------
/ds-algo/images/growth rates.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/growth rates.png
--------------------------------------------------------------------------------
/ds-algo/images/map-hierarchy-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/map-hierarchy-2.png
--------------------------------------------------------------------------------
/ds-algo/images/rates of growth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/ds-algo/images/rates of growth.png
--------------------------------------------------------------------------------
/hibernate/hibernate.md:
--------------------------------------------------------------------------------
1 | Hibernate
2 | =================
3 |
4 | Hibernate is an Object Relational Mapping tool (ORM tool), that maps the Java objects to the database tables and vice-versa.
5 |
6 | Some points to remember:
7 | - Hibernate framework provides the facility to create database tables automatically
8 | - Hibernate framework provides us object-oriented version of SQL known as HQL (Hibernate Query Language). It generates the database independent queries. So, even if our database gets changed, we don’t have to change our SQL queries according to the new database
9 | - Using Hibernate, we can define relationships between our Entities (tables), that makes it easy to fetch data from multiple tables
10 | - Hibernate supports Caching, that improves the performance of our application
11 | - Using Hibernate, we can generate the Primary key of our tables automatically
12 |
13 |
14 | JPA vs Hibernate
15 | -----------------
16 | JPA is just a specification i.e. it defines a set of concepts that can be implemented by any tool or framework, and Hibernate is one of the implementation of JPA.
17 |
18 |
19 | @Entity annotation
20 | ------------------
21 | @Entity annotation defines that a class can be mapped to a database table. The class fields will be mapped to the columns of the table.
22 |
23 |
24 | @Id & @GeneratedValue
25 | ------------------
26 | @Id annotation defines the primary key of a table and @GeneratedValue annotation is used to specify the primary key generation strategy to use. If the strategy is not specified, the default strategy AUTO will be used.
27 |
28 | get() and load() methods of Hibernate Session
29 | ------------------
30 |
31 | Hibernate Session class provides two methods to access object, session.get() and session.load()
32 |
33 | The differences are:
34 | - get() method involves a database hit, if the object does not exist in Session cache and it returns a fully initialized object which may involve several database calls, whereas load() method returns a proxy object and it only hit the database if any method other than getId() is called on the entity object
35 | - load() method results in slightly better performance as it can return a proxy object, it will only hit the database when a non-identifier getter method is called, whereas get() method returns a fully initialized object when it does not exist in Session cache which may involve multiple database calls based on entity relationships
36 | - get() method returns null if the object is not found in the cache as well as the database whereas load() method will throw ObjectNotFoundException but never return null
37 | - If you are not sure whether the object exists or not, then use get() as it will return null but if you are sure that the object exists, then use load() method as it is lazily initialized
38 |
39 |
40 | save(), saveOrUpdate() and persist() method of Hibernate Session
41 | ------------------
42 | Hibernate Session class provides various methods to save an object into the database, like save(), saveOrUpdate() and persist()
43 |
44 | The difference between save() and saveOrUpdate() method is that save() method saves the record into the database by INSERT query, generates a new identifier and returns the Serializable identifier back, while saveOrUpdate() method either INSERT the record or UPDATE the record if it already exists, so it involves extra processing to find whether the record already exists in the table or not.
45 | Similar to save(), persist() method is also used to save the record into the database table.
46 |
47 | The differences between save() and persist() are:
48 |
49 | - Return type of persist() method is void while return type of save() method is Serializable object
50 | - Both persist() and save() methods makes a transient instance persistent. But persist() method does not guarantee that the identifier value will be assigned to the persistent instance immediately, the assignment might happen at flush time
51 | - Both behave differently when they are executed outside the transaction boundaries. persist() method ensures that it will not execute an INSERT when it is called outside of a transaction boundary whereas save() method does not guarantee this, it returns an identifier and if an INSERT query has to be executed to get the identifier then this INSERT happens immediately and it does not matter if the save() is called inside or outside of a transaction
52 | - persist() method is useful in long-running conversation with an extended Session context because it does not execute an INSERT outside of a transaction. On the other hand, save() method is not good in a long-running conversation with an extended Session context
53 |
54 |
55 | Session and SessionFactory
56 | ------------------
57 | SessionFactory creates and manages the Session objects.
58 |
59 | **Some points about SessionFactory:**
60 | - it is one instance per datasource/database
61 | - it is thread-safe
62 | - it is an immutable and heavy-weight object as it maintains Sessions, mappings, hibernate configurations etc.
63 | - SessionFactory provides second level cache in hibernate also called application-level cache
64 |
65 | **Some points about Session:**
66 | - Session objects are created using sessionFactory.openSession()
67 | - It is one instance per client/thread/transaction
68 | - It is not thread-safe
69 | - It is light-weight
70 | - Session provides first level cache, which is short-lived
71 |
72 | First Level and Second Level Cache
73 | ------------------
74 | Hibernate framework provides caching at two levels, first-level cache which is at the Session level and second-level cache which is at the application level.
75 |
76 | **The first level cache** minimizes the database access for the same object if it is requested from the same Session. The first level cache is by default enabled. When you call session.get() method then it hits the database, and while returning, it also saves this object in the first-level cache. So, the subsequent requests for this same object from the same session will not hit the database and the object from cache will be used.
77 |
78 | But, since this cache is associated with the Session object, which is a short-lived object in Hibernate, as soon as the session is closed, all the information held in the cache is also lost. So, if we try to load the same object using the get() method, Hibernate will go to the database again and fetch the record.
79 |
80 | This poses a significant performance challenge in an application where multiple sessions are used, Hibernate provides second-level cache for this and it can be shared among multiple sessions.
81 |
82 | **The second level cache** is maintained at the SessionFactory level, this cache is by default disabled, to enable second level cache in hibernate, it needs to be configured in hibernate configuration file, i.e. hibernate.cfg.xml file. There are various providers of second level cache, like EhCache, OSCache etc.
83 |
84 | Once second level cache is configured, then object request will first go to the first-level cache, if it is not found there, then it will look for this object in second-level cache, if found then it will be returned from the second-level cache and it will also save a copy in first-level cache.
85 |
86 | But, If the object is not found in the second-level cache also, then it will hit the database and if it present in database, this object will be put into both first and second level cache, so that if any other session requests for this object then it will be returned from the cache.
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/kafka/images/kafka-message-flow.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/kafka/images/kafka-message-flow.jpeg
--------------------------------------------------------------------------------
/kafka/kafka.md:
--------------------------------------------------------------------------------
1 | Kafka
2 | =================
3 |
4 | > Apache Kafka is a publish-subscribe messaging system developed by Apache written in Scala. It is a distributed, partitioned and replicated log service.
5 |
6 | The important features are data partitioning, scalability, low-latency, high throughputs, stream processing, durability, zero data loss, etc.
7 |
8 | The main components of Kafka are:
9 |
10 | - **Topic**: A bunch of messages which are of the same type come under the same topic.
11 | - **Producer**: A producer, as the name suggests, produces messages and can communicate to the selected topic.
12 | - **Brokers**: These act as a channel between the producers and consumers. They are a set of servers where the published messages are stored.
13 | - **Consumer**: The consumer is the one who is going to the consumer the published data. It can subscribe to different topics and then pull data from the brokers.
14 |
15 |
16 |
17 |
18 |
19 | Advantages of Kafka
20 | -------------------
21 | There are some advantages of Kafka, which makes it significant to use:
22 |
23 | - **High-throughput** : We do not need any large hardware in Kafka, because it is capable of handling high-velocity and high-volume data. Moreover, it can also support message throughput of thousands of messages per second.
24 | - **Low Latency** : Kafka can easily handle these messages with the very low latency of the range of milliseconds, demanded by most of the new use cases.
25 | - **Fault-Tolerant** : Kafka is resistant to node/machine failure within a cluster.
26 | - **Durability** : As Kafka supports messages replication, so, messages are never lost. It is one of the reasons behind durability.
27 | - **Scalability** : Kafka can be scaled-out, without incurring any downtime on the fly by adding additional nodes.
28 |
29 | The primary advantages of Kafka include fault-tolerance, higher throughput, scalability, lower latency, and durability. Kafka does not require any large-scale hardware components and shows exceptional performance in the management of high-volume and high-velocity data.
30 |
31 | Most important of all, it can support message throughput at the rate of thousand messages per second. Kafka depicts promising resistance to the failure of nodes or machines within a cluster. Lower latency of Kafka can help in easily managing the messages within milliseconds. In addition, Kafka also ensures message replication, thereby reducing any concerns of message loss. Another critical benefit of Apache Kafka is the scalability that it ensures through the addition of more nodes.
32 |
33 |
34 | Offset in Kafka
35 | ---------------
36 |
37 | - There is a sequential ID number given to the messages in the partitions what we call, an offset. So, to identify each message in the partition uniquely, we use these offsets.
38 | - Offset is nothing but a unique id that is assigned to the partitions. The messages are contained in this partitions. The important aspect or use of offset is that it identifies every message with the id which is available within the partition.
39 |
40 | Consumer group in Kafka
41 | -----------------------
42 |
43 | A Consumer group is made up of one or more consumers that together subscribe to the different topics and fetch data from the brokers.
44 |
45 | ZooKeeper in Kafka
46 | ---------------
47 |
48 | Apache Kafka is a distributed system is built to use Zookeeper. Although, Zookeeper’s main role here is to build coordination between different nodes in a cluster. However, we also use Zookeeper to recover from previously committed offset if any node fails because it works as periodically commit offset.
49 |
50 | In addition, Zookeeper also helps in leader detection, configuration management, synchronization, and detecting any node leaving or joining the cluster. Furthermore, Kafka implements Zookeeper as storage for offsets of consumed messages regarding a specific topic. Zookeeper also helps in partitioning the offsets of messages according to specific Consumer Groups.
51 |
52 | Can I use Kafka without Zookeeper?
53 | -----------------------
54 |
55 | No. You cannot bypass Zookeeper for a direct connection with the Kafka server. In addition, it is also essential to note that servicing client requests becomes impossible when Zookeeper is experiencing downtime.
56 |
57 | It is impossible to bypass Zookeeper and connect directly to the Kafka server, so the answer is no. If somehow, ZooKeeper is down, then it is impossible to service any client request.
58 |
59 | What is Zookeeper in Kafka? Can we use Kafka without Zookeeper?
60 | -----------------------
61 | Zookeeper is an open source, high-performance co-ordination service used for distributed applications adapted by Kafka.
62 |
63 | No, it is not possible to bye-pass Zookeeper and connect straight to the Kafka broker. Once the Zookeeper is down, it cannot serve client request.
64 |
65 | - Zookeeper is basically used to communicate between different nodes in a cluster
66 | - In Kafka, it is used to commit offset, so if node fails in any case it can be retrieved from the previously committed offset
67 | - Apart from this it also does other activities like leader detection, distributed synchronization, configuration management, identifies when a new node leaves or joins, the cluster, node status in real time, etc.
68 |
69 |
70 | Partition in Kafka
71 | ---------------
72 | In every Kafka broker, there are few partitions available. And, here each partition in Kafka can be either a leader or a replica of a topic
73 |
74 | - Topic partition is the unit of parallelism in Kafka. On both the producer and the broker side, writes to different partitions can be done fully in parallel. On the consumer side, Kafka always gives a single partition’s data to one consumer thread. Thus, the degree of parallelism in the consumer (within a consumer group) is bounded by the number of partitions being consumed. Therefore, in general, the more partitions there are in a Kafka cluster, the higher the throughput one can achieve.
75 |
76 | - How many partitions are there inside each Topic? That's configurable.
77 | You can increase partition but once increased, you can not decrease it. Apache Kafka provides us with alter command to change Topic behavior and add/modify configurations. We will be using alter command to add more partitions to an existing Topic.
78 |
79 | Here is the command to increase the partitions count for topic 'my-topic' to 20 -
80 |
81 | > ./bin/kafka-topics.sh --alter --zookeeper localhost:2181 --topic my-topic --partitions 20
82 |
83 | You can verify whether partitions have been increased by using describe command as follows -
84 |
85 | > ./bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-topic
86 |
87 |
88 | APIs of Kafka
89 | ---------------
90 | Apache Kafka has 4 main APIs:
91 |
92 | 1. Producer API
93 | 2. Consumer API
94 | 3. Streams API
95 | 4. Connector API
96 |
97 |
98 |
99 | Load balancing of the server in Kafka
100 | ---------------
101 | In every partition of Kafka, there is one server which acts as the Leader, and none or more servers plays the role as a Followers.
102 |
103 | As the main role of the Leader is to perform the task of all read and write requests for the partition, whereas Followers passively replicate the leader. Hence, at the time of Leader failing, one of the Followers takeover the role of the Leader. Basically, this entire process ensures load balancing of the servers.
104 |
105 | Retention period in Kafka cluster
106 | ---------------
107 |
108 | However, retention period retains all the published records within the Kafka cluster. It doesn’t check whether they have been consumed or not. Moreover, the records can be discarded by using a configuration setting for the retention period. And, it results as it can free up some space.
109 |
110 | The maximum size of a message that can be received by the Kafka is approx. 10,00,000 bytes.
111 |
112 | RabbitMQ vs Apache Kafka
113 | ---------------
114 |
115 | - **Features**
116 | - Apache Kafka– Kafka is distributed, durable and highly available, here the data is shared as well as replicated.
117 | - RabbitMQ– There are no such features in RabbitMQ.
118 |
119 | - **Performance rate**
120 | - Apache Kafka– To the tune of 100,000 messages/second.
121 | - RabbitMQ- In case of RabbitMQ, the performance rate is around 20,000 messages/second.
122 |
123 | Explain Apache Kafka Use Cases?
124 | ---------------
125 | Apache Kafka has so many use cases, such as:
126 |
127 | - **Kafka Metrics** It is possible to use Kafka for operational monitoring data. Also, to produce centralized feeds of operational data, it involves aggregating statistics from distributed applications.
128 | - **Kafka Log Aggregation** Moreover, to gather logs from multiple services across an organization.
129 | - **Stream Processing** While stream processing, Kafka’s strong durability is very useful.
130 |
131 | Kafka Cluster, and its key benefits
132 | ---------------
133 | Kafka cluster is a group containing more than one broker. It has zero downtime during the expansion of clusters and can help in the replication of message data and management of persistence.
134 |
135 | The cluster-centric design of the Kafka cluster improves durability. Most important of all, one of the brokers in a cluster manages the states of replicas and partitions. The concerned broker is also responsible for performing administrative tasks such as the reassignment of partitions.
136 |
137 | Replicas in Kafka
138 | ---------------
139 | Replicas in Kafka are basically a list of nodes that replicate the log for a specific partition without considering whether the nodes serve as the Leader. Replicas are highly significant in Kafka because of the safety of published messages. Replication ensures that users can consume published messages even in circumstances such as program error, regular software updates, or machine errors.
140 |
141 | Starting a Kafka server
142 | ---------------
143 | Since Kafka uses ZooKeeper, it is essential to initialize the ZooKeeper server, and then fire up the Kafka server.
144 |
145 | 1. To start the ZooKeeper server:
146 | > bin/zookeeper-server-start.sh config/zookeeper.properties
147 | 2. Next, to start the Kafka server:
148 | > bin/kafka-server-start.sh config/server.properties
149 |
150 | In the Producer, when does QueueFullException occur?
151 | ---------------
152 | QueueFullException typically occurs when the Producer attempts to send messages at a pace that the Broker cannot handle. Since the Producer doesn’t block, users will need to add enough brokers to collaboratively handle the increased load.
153 |
154 |
155 | Confluent Kafka vs. Apache Kafka
156 | ---------------
157 |
158 | - **Performance**
159 | - Confluent Kafka performs really well, and even under the higher workloads, its performance is unwavering.
160 | - On the other hand, though Apache Kafka performs well, it still lags behind Confluent Kafka’s performance.
161 |
162 | - **Pros and Cons**
163 |
164 | - **Confluent Kafka Pros**
165 | - It has almost all the attributes of Kafka and some extra attributes as well.
166 | - It streamlines the admin operations procedures with much ease.
167 | - It takes the burden of worrying about data relaying, off the data managers.
168 | - **Confluent Kaka Cons**
169 | - Confluent Kafka is created by using Apache Kafka, and hence the scope of tweaking it further is limited.
170 | - Confluent Kafka’s fault-tolerant capabilities may be questioned in some cases.
171 |
172 | - **Apache Kafka Pros**
173 | - Apache Kafka is an open-source platform.
174 | - It allows you to have the flexibility and features to tweak the code as per your requirements.
175 | - It is known for its fault tolerance and durability.
176 | - It is easily accessible and gives you real-time feedback.
177 | - **Apache Kafka Cons**
178 | - It is only a pub-sub platform and doesn’t have the entire data processing and data operations tools.
179 | - In some cases, if the workload goes too high, it tends to work an awry manner.
180 | - You cannot use the point-to-point and request/reply messages in Apache Kafka.
181 |
182 | - **Pricing**
183 | - The pricing model of Confluent Kafka is based on cloud usage, and typically it costs you around $0.11 per GB. The usage calculated based on the data stored on the Confluent Cloud.
184 | - Apache Kafka is an open-source platform that you can use for free, but you need to store the data on your cloud/on-premise platforms.
185 |
186 | Confluent Kafka has far more capabilities than Apache Kafka, but you need to pay to use Confluent Kafka.
187 | But, Apache Kafka is free of cost, and you can make the tweaks as per your requirements on the platforms too.
188 |
189 |
190 |
191 | For more information:
192 | 1. [Top 30 Apache Kafka Interview Questions](https://www.whizlabs.com/blog/apache-kafka-interview-questions/)
193 | 2. [Apache Kafka Tutorial](https://www.javatpoint.com/apache-kafka)
194 |
195 |
196 |
197 |
--------------------------------------------------------------------------------
/maven/maven.md:
--------------------------------------------------------------------------------
1 |
2 | Maven
3 | =====
4 |
5 | Maven is a tool that is used for building and managing any Java based project. It is a powerful project management tool that is based on POM (Project Object Model). It simplifies the build process.
6 |
7 | pom.xml
8 | -------
9 | POM stands for Project Object Model, it is an xml file which contains the configuration information related to the project. Maven uses this file to build the project. We specify all the dependencies that are needed for a project, the plugins, goals etc. By using tag, we can specify whether we need to build the project into a JAR/WAR etc.
10 |
11 | Maven build life-cycle
12 | ----------------------
13 |
14 | Maven build life-cycle is made up of phases
15 | - **validate**: validate the project is correct and all necessary information is available
16 | - **compile**: compile the source code of the project
17 | - **test**: test the compiled source code using a suitable unit testing framework. These tests should not require the code to be packaged or deployed
18 | - **package**: take the compiled code and package it in its distributable format, such as a JAR
19 | - **verify**: run any checks on results of integration tests to ensure quality criteria’s are met
20 | - **install**: install the package into the local repository, for using as a dependency in other projects locally
21 | - **deploy**: done in the build environment, copies the final package to the remote repository for sharing with other developers and projects
22 |
23 | Maven will first validate the project, then it will try to compile the sources, run the tests against the compiled code, package the binaries (e.g. jar), run integration tests against that package, verify the integration tests, install the verified package to the local repository and then deploy the installed package to a remote repository.
24 | mvn command can be used to execute these build life-cycle phases. If you run mvn verify, then it will execute all the phases in order, validate, compile, test, package before calling the verify . We only need to call the last build phase.
25 | mvn clean command is used to delete all the project jars that are built by Maven (/target directory of a project). Generally, this clean command is used with install/deploy phase, like mvn clean deploy to cleanly build and deploy artifacts into the shared repository.
26 |
27 |
28 |
--------------------------------------------------------------------------------
/micro-services/Transactions.md:
--------------------------------------------------------------------------------
1 | # Managing transactions
2 |
3 | ## ACID Transactions
4 |
5 | Typically, when we talk about database transactions, we are talking about ACID transactions.
6 |
7 | ACID stands for atomicity, consistency, isolation, and durability, and here is what these properties give us:
8 |
9 | **Atomicity**
10 | Ensures that all operations completed within the transaction either all complete or all fail. If any of the changes
11 | we’re trying to make fail for some reason, then the whole operation is aborted, and it’s as though no changes were ever
12 | made.
13 |
14 | **Consistency**
15 | When changes are made to our database, we ensure it is left in a valid, consistent state.
16 |
17 | **Isolation**
18 | Allows multiple transactions to operate at the same time without interfering. This is achieved by ensuring that any
19 | interim state changes made during one transaction are invisible to other transactions.
20 |
21 | **Durability**
22 | Makes sure that once a transaction has been completed, we are confident the data won’t get lost in the event of some
23 | system failure.
24 |
25 | ## What is a distributed transaction?
26 |
27 | When a microservice architecture decomposes a monolithic system into self-encapsulated services, it can break
28 | transactions. This means a local transaction in the monolithic system is now distributed into multiple services that
29 | will be called in a sequence.
30 |
31 | Here is a customer order example with a monolithic system using a local transaction:
32 |
33 |
34 |
35 |
36 | In the customer order example above, if a user sends a Put Order action to a monolithic system, the system will create a
37 | local database transaction that works over multiple database tables. If any step fails, the transaction can roll back.
38 | This is known as ACID (Atomicity, Consistency, Isolation, Durability), which is guaranteed by the database system.
39 |
40 | When we decompose this system, we created both the `CustomerMicroserviceand` the `OrderMicroservice`, which have
41 | separate databases. Here is a customer order example with microservices:
42 |
43 |
44 |
45 |
46 | When a Put Order request comes from the user, both microservices will be called to apply changes into their own
47 | database. Because the transaction is now across multiple databases, it is now considered a _distributed transaction_.
48 |
49 | **What is the problem?**
50 | In a monolithic system, we have a database system to ensure ACIDity. We now need to clarify the following key problems.
51 |
52 | **How do we keep the transaction atomic?**
53 | In a database system, atomicity means that in a transaction either all steps complete or no steps complete. The
54 | microservice-based system does not have a global transaction coordinator by default. In the example above, if
55 | the `CreateOrder` method fails, how do we roll back the changes we applied by the `CustomerMicroservice`?
56 |
57 | **Do we isolate user actions for concurrent requests?**
58 | If an object is written by a transaction and at the same time (before the transaction ends), it is read by another
59 | request, should the object return old data or updated data? In the example above, once UpdateCustomerFund succeeds but
60 | is still waiting for a response from CreateOrder, should requests for the current customer's fund return the updated
61 | amount or not?
62 |
63 | **Possible solutions**
64 |
65 | The problems above are important for microservice-based systems. Otherwise, there is no way to tell if a transaction has
66 | completed successfully. The following two patterns can resolve the problem:
67 |
68 | 1. 2pc (two-phase commit)
69 | 2. Saga
70 |
71 | ## Two-phase commit (2pc) pattern
72 |
73 | 2pc is widely used in database systems. For some situations, you can use 2pc for microservices. Just be careful; not all
74 | situations suit 2pc and, in fact, 2pc is considered impractical within a microservice architecture (explained below).
75 |
76 | So what is a two-phase commit?
77 |
78 | As its name hints, 2pc has two phases: A prepare phase and a commit phase. In the prepare phase, all microservices will
79 | be asked to prepare for some data change that could be done atomically. Once all microservices are prepared, the commit
80 | phase will ask all the microservices to make the actual changes.
81 |
82 | Normally, there needs to be a global coordinator to maintain the lifecycle of the transaction, and the coordinator will
83 | need to call the microservices in the prepare and commit phases.
84 |
85 | Here is a 2pc implementation for the customer order example:
86 |
87 |
88 |
89 | In the example above, when a user sends a put order request, the Coordinator will first create a global transaction with
90 | all the context information. It will then tell CustomerMicroservice to prepare for updating a customer fund with the
91 | created transaction. The CustomerMicroservice will then check, for example, if the customer has enough funds to proceed
92 | with the transaction. Once CustomerMicroservice is OK to perform the change, it will lock down the object from further
93 | changes and tell the Coordinator that it is prepared. The same thing happens while creating the order in the
94 | OrderMicroservice. Once the Coordinator has confirmed all microservices are ready to apply their changes, it will then
95 | ask them to apply their changes by requesting a commit with the transaction. At this point, all objects will be
96 | unlocked.
97 |
98 | If at any point a single microservice fails to prepare, the Coordinator will abort the transaction and begin the
99 | rollback process. Here is a diagram of a 2pc rollback for the customer order example:
100 |
101 |
102 |
103 | In the above example, the CustomerMicroservice failed to prepare for some reason, but the OrderMicroservice has replied
104 | that it is prepared to create the order. The Coordinator will request an abort on the OrderMicroservice with the
105 | transaction and the OrderMicroservice will then roll back any changes made and unlock the database objects.
106 |
107 | ### Benefits of using 2pc
108 |
109 | 2pc is a very strong consistency protocol. First, the prepare and commit phases guarantee that the transaction is
110 | atomic. The transaction will end with either all microservices returning successfully or all microservices have nothing
111 | changed. Secondly, 2pc allows read-write isolation. This means the changes on a field are not visible until the
112 | coordinator commits the changes.
113 |
114 | ### Disadvantages of using 2pc
115 |
116 | While 2pc has solved the problem, it is not really recommended for many microservice-based systems because 2pc is
117 | synchronous (blocking). The protocol will need to lock the object that will be changed before the transaction completes.
118 | In the example above, if a customer places an order, the "fund" field will be locked for the customer. This prevents the
119 | customer from applying new orders. This makes sense because if a "prepared" object changed after it claims it is "
120 | prepared," then the commit phase could possibly not work.
121 |
122 | This is not good. In a database system, transactions tend to be fast—normally within 50 ms. However, microservices have
123 | long delays with RPC calls, especially when integrating with external services such as a payment service. The lock could
124 | become a system performance bottleneck. Also, it is possible to have two transactions mutually lock each other (
125 | deadlock) when each transaction requests a lock on a resource the other requires.
126 |
127 | ## Saga pattern
128 |
129 | The Saga pattern is another widely used pattern for distributed transactions. It is different from 2pc, which is
130 | synchronous. The Saga pattern is asynchronous and reactive. In a Saga pattern, the distributed transaction is fulfilled
131 | by asynchronous local transactions on all related microservices. The microservices communicate with each other through
132 | an event bus.
133 |
134 | Here is a diagram of the Saga pattern for the customer order example:
135 |
136 |
137 |
138 | In the example above, the OrderMicroservice receives a request to place an order. It first starts a local transaction to
139 | create an order and then emits an OrderCreated event. The CustomerMicroservice listens for this event and updates a
140 | customer fund once the event is received. If a deduction is successfully made from a fund, a CustomerFundUpdated event
141 | will then be emitted, which in this example means the end of the transaction.
142 |
143 | If any microservice fails to complete its local transaction, the other microservices will run compensation transactions
144 | to rollback the changes. Here is a diagram of the Saga pattern for a compensation transaction:
145 |
146 |
147 |
148 | In the above example, the UpdateCustomerFund failed for some reason and it then emitted a CustomerFundUpdateFailed
149 | event. The OrderMicroservice listens for the event and start its compensation transaction to revert the order that was
150 | created.
151 |
152 | ### Advantages of the Saga pattern
153 |
154 | One big advantage of the Saga pattern is its support for long-lived transactions. Because each microservice focuses only
155 | on its own local atomic transaction, other microservices are not blocked if a microservice is running for a long time.
156 | This also allows transactions to continue waiting for user input. Also, because all local transactions are happening in
157 | parallel, there is no lock on any object.
158 |
159 | ### Disadvantages of the Saga pattern
160 |
161 | The Saga pattern is difficult to debug, especially when many microservices are involved. Also, the event messages could
162 | become difficult to maintain if the system gets complex. Another disadvantage of the Saga pattern is it does not have
163 | read isolation. For example, the customer could see the order being created, but in the next second, the order is
164 | removed due to a compensation transaction.
165 |
166 | **Adding a process manager**
167 | To address the complexity issue of the Saga pattern, it is quite normal to add a process manager as an orchestrator. The
168 | process manager is responsible for listening to events and triggering endpoints.
169 |
170 | Reference:
171 |
172 | https://developers.redhat.com/blog/2018/10/01/patterns-for-distributed-transactions-within-a-microservices-architecture#possible_solutions
173 |
174 | https://dzone.com/articles/practical-transaction-handling-in-microservice-arc
175 |
--------------------------------------------------------------------------------
/micro-services/images-ms/2pc implementation for the customer order .png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/2pc implementation for the customer order .png
--------------------------------------------------------------------------------
/micro-services/images-ms/2pc rollback for the customer order .png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/2pc rollback for the customer order .png
--------------------------------------------------------------------------------
/micro-services/images-ms/API-Gateway.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/API-Gateway.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Beehive-Representation-Microservices.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Beehive-Representation-Microservices.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Best Practices in Microservices Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Best Practices in Microservices Architecture.png
--------------------------------------------------------------------------------
/micro-services/images-ms/CircuitBreakerImplementation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/CircuitBreakerImplementation.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Client Crendtials Sequence Diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Client Crendtials Sequence Diagram.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Config Server Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Config Server Architecture.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Different Layers in a Single Microservice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Different Layers in a Single Microservice.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Encoded JWT Claim.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Encoded JWT Claim.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Eureka Registry Screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Eureka Registry Screenshot.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Five Grants in OAuth 2.0 protocol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Five Grants in OAuth 2.0 protocol.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Four different roles in OAuth 2.0 protocol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Four different roles in OAuth 2.0 protocol.png
--------------------------------------------------------------------------------
/micro-services/images-ms/High Level Eureka Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/High Level Eureka Architecture.png
--------------------------------------------------------------------------------
/micro-services/images-ms/OAuth2 Use in Microservices Context.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/OAuth2 Use in Microservices Context.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Saga pattern for a compensation transaction.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Saga pattern for a compensation transaction.png
--------------------------------------------------------------------------------
/micro-services/images-ms/Saga pattern for the customer order.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/Saga pattern for the customer order.png
--------------------------------------------------------------------------------
/micro-services/images-ms/customer order example with microservices.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/customer order example with microservices.png
--------------------------------------------------------------------------------
/micro-services/images-ms/microservice-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/microservice-architecture.png
--------------------------------------------------------------------------------
/micro-services/images-ms/monolithic system using a local transactio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/micro-services/images-ms/monolithic system using a local transactio.png
--------------------------------------------------------------------------------
/misc/GoodPractices.md:
--------------------------------------------------------------------------------
1 | Good Practices
2 | =============
3 |
4 |
5 | Good Practices for REST API Design
6 | -----------------------------------
7 |
8 | A REST API is an application programming interface that conforms to specific architectural constraints, like stateless
9 | communication and cacheable data. It is not a protocol or standard. While REST APIs can be accessed through a number of
10 | communication protocols, most commonly, they are called over HTTPS,
11 |
12 |
13 | Accept and respond with JSON
14 | ----------------------------
15 | REST APIs should accept JSON for request payload and also send responses to JSON. JSON is the standard for transferring
16 | data. Almost every networked technology can use it
17 |
18 | To make sure that when our REST API app responds with JSON that clients interpret it as such, we should
19 | set `Content-Type` in the response header to `application/json`
20 |
21 | Use nouns instead of verbs in endpoint paths
22 | -----------------------------------
23 | We shouldn’t use verbs in our endpoint paths. Instead, we should use the nouns which represent the entity that the
24 | endpoint that we’re retrieving or manipulating as the pathname.
25 |
26 | we should create routes like
27 |
28 | - GET /articles/ for getting news articles. Likewise,
29 | - POST /articles/ is for adding a new article ,
30 | - PUT /articles/:id is for updating the article with the given id.
31 | - DELETE /articles/:id is for deleting an existing article with the given ID.
32 |
33 | Nesting resources for hierarchical objects
34 | -----------------------------------
35 | When designing endpoints, it makes sense to group those that contain associated information. That is, if one object can
36 | contain another object, you should design the endpoint to reflect that.
37 |
38 | For example, if we want an endpoint to get the comments for a news article, we should append the /comments path to the
39 | end of the /articles path.
40 |
41 |
42 | Handle errors gracefully and return standard error codes
43 | -----------------------------------
44 | we should handle errors gracefully and return HTTP response codes that indicate what kind of error occurred. This gives
45 | maintainers of the API enough information to understand the problem that’s occurred. We don’t want errors to bring down
46 | our system, so we can leave them unhandled, which means that the API consumer has to handle them.
47 |
48 | Common error HTTP status codes include:
49 |
50 | - **400 Bad Request** – This means that client-side input fails validation.
51 | - **401 Unauthorized** – This means the user isn’t not authorized to access a resource. It usually returns when the user
52 | isn’t authenticated.
53 | - **403 Forbidden** – This means the user is authenticated, but it’s not allowed to access a resource.
54 | - **404 Not Found** – This indicates that a resource is not found.
55 | - **500 Internal server error** – This is a generic server error. It probably shouldn’t be thrown explicitly.
56 | - **502 Bad Gateway** – This indicates an invalid response from an upstream server.
57 | - **503 Service Unavailable** – This indicates that something unexpected happened on server side (It can be anything
58 | like server overload, some parts of the system failed, etc.).
59 |
60 | We should be throwing errors that correspond to the problem that our app has encountered. For example, if we want to
61 | reject the data from the request payload, then we should return a 400 response
62 |
63 |
64 | Allow filtering, sorting, and pagination
65 | -----------------------------------
66 | The databases behind a REST API can get very large. Sometimes, there’s so much data that it shouldn’t be returned all at
67 | once because it’s way too slow or will bring down our systems. Therefore, we need ways to filter items.
68 |
69 | We also need ways to paginate data so that we only return a few results at a time. We don’t want to tie up resources for
70 | too long by trying to get all the requested data at once.
71 |
72 | Filtering and pagination both increase performance by reducing the usage of server resources. As more data accumulates
73 | in the database, the more important these features become.
74 |
75 |
76 | Maintain Good Security Practices
77 | -----------------------------------
78 |
79 | Most communication between client and server should be private since we often send and receive private information.
80 | Therefore, using SSL/TLS for security is a must.
81 |
82 | People shouldn’t be able to access more information that they requested. For example, a normal user shouldn’t be able to
83 | access information of another user. They also shouldn’t be able to access data of admins.
84 |
85 | If we choose to group users into a few roles, then the roles should have the permissions that cover all they need and no
86 | more.
87 |
88 | Cache data to improve performance
89 | -----------------------------------
90 | We can add caching to return data from the local memory cache instead of querying the database to get the data every
91 | time we want to retrieve some data that users request. The good thing about caching is that users can get data faster.
92 | However, the data that users get may be outdated. This may also lead to issues when debugging in production environments
93 | when something goes wrong as we keep seeing old data.
94 |
95 | There are many kinds of caching solutions like Redis, in-memory caching,etc. We can change the way data is cached as our
96 | needs change.
97 |
98 | Versioning our APIs
99 | --------------------
100 |
101 | We should have different versions of API if we’re making any changes to them that may break clients. The versioning can
102 | be done according to semantic version (for example, 2.0.6 to indicate major version 2 and the sixth patch) like most
103 | apps do nowadays.
104 |
105 | Versioning is usually done with /v1/, /v2/, etc. added at the start of the API path.
106 |
107 |
108 | Java code review checklist
109 | -------------------------
110 |
111 | Code Quality
112 | -------------------------
113 |
114 | - Basic Checks (before)
115 | - The code compiles
116 | - Old unit tests pass
117 | - The code was tested
118 | - The code was developer-tested
119 | - The new code must be covered by unit tests
120 | - Any refactoring must be covered by unit tests
121 | - At least 80% coverage for the code changes
122 | - Clean Code
123 | - Naming Conventions (classes, constants, variables, methods (void vs return), etc)
124 | - No hard-coded variables
125 | - Indentation
126 | - No spelling mistakes
127 | - The code does what it says it does
128 | - The code is easy to read (**Readability**)
129 | - Avoid duplicate code
130 | - Check if the code could be replaced by calling functions in other libraries/components
131 | - Best Practices
132 | - OOP Principles are correctly used
133 | - The code follows the SOLID PRINCIPLES
134 | - Design Patterns
135 | - Recommend a design pattern when you fill that a pattern could fit there
136 | - Exception Handling
137 | - Ensure that each exception is raised and handled correctly
138 | - Ensure that you have a well-defined exception hierarchy
139 | - Split the exceptions in two types: technical and business
140 |
141 | ### Branching Strategy
142 |
143 | ### Application Structure
144 |
145 | - Ensure that your changes are correctly written on layers and it respects the Spring Boo App Structure
146 | - Do not mixt up a Rest Service with a Web App (like Spring Rest with Spring MVC )
147 |
148 | ### Model
149 |
150 | - Understand the meaning of each model you defined and treat it like the most appropiate terminology for it: DTO (form
151 | for MVC), entity, Value Object, Java Bean
152 | - Place it in the correct package
153 |
154 | ### Rest API
155 |
156 | - Ensure your Rest API respects the REST maturity levels
157 | - Error Codes
158 | - Ensure that in case of any error or exception the API replies with the standard error codes (defined at the system
159 | level)
160 | - Do not write Rest API which have different directions for receiving requestions from
161 | - Example: having a service which facilitates communication with an external 3rd party, do not expose the same
162 | service for the 3rd party to request back, for this use a different service (e.g. a notification service)
163 | - You can get into a confusion when you have to handle differently the same exception in the service, but it
164 | comes from different directions, you have 2 clients and cannot reply with the same error codes (one should be
165 | internally and the other one specific to the 3rd party)
166 | - DTO
167 | - Use DTO pattern for passing data between controller and service layer (at least when using **sensitive** or **
168 | aggregated** data)
169 |
170 | ### MVC
171 |
172 | - Ensure that your Model-View-Controller parts of the Web Application are compliant to the Best Practices
173 | - JSP don't have business logic (no Java code, only JSTL, Spring or Thymeleaf tags)
174 | - Model doesn't have business logic
175 | - Controller delegates the requests to the business (service) layer, it doesn't have logic
176 | - Use DTO (classes ending with Form in case of MVC) pattern for passing data between controller and service layer (
177 | at least when using **sensitive** or **aggregated** data)
178 |
179 | ### Performance
180 |
181 | - Release resources (HTTP connections, DB connections, Files, any I/O streams )
182 |
183 | ### Security
184 |
185 | ### Thead-safety
186 |
187 | - Ensure your code is not a candidate for race conditions and deadlocks
188 |
189 | ### Logging
190 |
191 | ### Alerting
192 |
193 | # Code Quality
194 |
195 | ## Clean Code
196 |
197 | Ensure that the code is clean as much as possible. Check also that SOLID principles are followed TODO Link to be
198 | provided
199 |
200 | ## Design Patterns
201 |
202 | Recommend a design pattern where is the case
203 |
204 | ## Naming Conventions
205 |
206 | ## App Structure
207 |
208 | Check that the changes are correctly written on layers and it respects the Spring Boot App Structure Do not mixt up a
209 | Rest Service with a Web App
210 |
211 | ## Model
212 |
213 | Understand the meaning of usage of each model and try to find where is the most appropriate to be placed as TODO - link
214 | to be provided
215 |
216 | ## Exception Handling
217 |
218 | Ensure that each exception is raised and handled correctly
219 |
220 | ## Unit Testing
221 |
222 | Coverage of minimum 80% Ensure that all corner cases are covered and it follows the unit testing standards
223 |
224 | ## Rest API
225 |
226 | Ensure that the API changes respect the REST maturity levels
227 |
228 | ## Error Codes
229 |
230 | Ensure that in case of a program error or exception, the API replies with the standard error codes
231 |
232 | ## MVC
233 |
234 | Ensure that the Model-View-Controller parts of the Web Application are compliant with the Best Practices
235 |
236 |
237 | For more information:
238 |
239 | 1. [Best practices for REST API design](https://stackoverflow.blog/2020/03/02/best-practices-for-rest-api-design/#h-name-collections-with-plural-nouns)
240 | 2. [REST: Good Practices for API Design](https://medium.com/hashmapinc/rest-good-practices-for-api-design-881439796dc9)
241 |
242 |
243 |
244 | # Resolving a Production Issue on a Live Server
245 |
246 |
247 | ## Notify All Stakeholders
248 |
249 | This can be done in form of a slack message to the general channel, an email copying relevant personnel or through any communication tool that the team uses. The importance of communication is to bring up the issue to everyone's attention and for all hands to be on deck. Sometimes, someone may just respond to your message with why the issue is happening (assuming it was a mistake from his/her part). But the goal is to inform and bring in the members of your team.
250 |
251 |
252 | ## Reproduce : Replicate the production environment locally
253 |
254 | - Replicate locally and step through code to figure out what is going on.
255 | - Add logs and replicate in production, then use logs to debug. This is only feasible if it is easy to do additional deployments that contain the logging code, and if the issue is in-fact reproducible.
256 | - Write similar code to what’s running in production (often starting with a copy/pasted variant of the production code), and try to replicate and isolate the issue that way.
257 |
258 | The objective here is to reproduce the exact same bug that exists in the production environment in a more accessible location. In the local environment, we can change data, slow down the execution, and write tons of logs without users being impacted in any way.
259 |
260 |
261 | ## Root Cause Analysis & Fix
262 |
263 | Logging and debuggers are your best friends here. If you’re dealing with a large codebase or new to the project, just attach a debugger and follow through navigation and triggering actions. Else, add logs around the suspicious code and perform the steps. You’ll eventually find the piece of code that’s causing the issue.
264 |
265 | ## Re-test & Regression Testing
266 | Test the steps that you found earlier, which caused the bug.
267 |
268 | ## Backup the System Before Implementing Complex Solution
269 |
270 | ## Document the Problem and How it was Resolved
271 |
272 | add the problem and the steps you took towards resolving it to the engineering runbook. The importance of this, is that it will help the team to quickly recover from such when/if it happens next time. It also serves as a reference to the team when they want to rebuild the system to be resistant to such issues.
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
--------------------------------------------------------------------------------
/misc/http.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # HTTP methods overview
4 |
5 |
6 |
7 | | Method | Scope | Semantics | Idempotent | Safe |
8 | |-----------------|---------------|------------|------------|-----|
9 | | GET | collection | Retrieve all resources in a collection| Yes | Yes |
10 | | GET | collection | Retrieve a single resource | Yes |Yes|
11 | | HEAD | collection | Retrieve all resources in a collection (header only) | Yes |Yes |
12 | | HEAD | resource | Retrieve a single resource (header only) | Yes | Yes|
13 | | POST | collection | Create a new resource in a collection | No | No |
14 | | PUT | resource | Update a resource |Yes | No |
15 | | PATCH | resource | Update a resource | No | No |
16 | | DELETE | resource | Delete a resource |Yes | No |
17 | | OPTIONS | any | Return available HTTP methods and other options | Yes | Yes |
18 |
19 |
20 |
21 | ## Idempotency in HTTP
22 |
23 | Idempotency is a property of HTTP methods.
24 |
25 | A request method is considered "idempotent" if the intended effect on the server of multiple identical requests with that method is the same as the effect for a single such request. Of the request methods defined by this specification, PUT, DELETE, and safe request methods are idempotent.
26 |
27 | Like the definition of safe, the idempotent property only applies to what has been requested by the user; a server is free to log each request separately, retain a revision control history, or implement other non-idempotent side effects for each idempotent request.
28 |
29 | > The term idempotent is used more comprehensively to describe an operation that will produce the same results if executed once or multiple times [...] An idempotent function is one that has the property f(f(x)) = f(x) for any value x.
30 |
31 |
32 | Ref: [4.2.2. Idempotent Methods](https://www.rfc-editor.org/rfc/rfc7231#section-4.2.2)
33 |
34 | ## Safe Methods
35 |
36 | Request methods are considered "safe" if their defined semantics are essentially read-only; i.e., the client does not request, and does not expect, any state change on the origin server as a result of applying a safe method to a target resource.
37 |
38 | This definition of safe methods does not prevent an implementation from including behavior that is potentially harmful, that is not entirely read-only, or that causes side effects while invoking a safe method. What is important, however, is that the client did not request that additional behavior and cannot be held accountable for it.
39 |
40 | Of the request methods defined by this specification, the GET, HEAD, OPTIONS, and TRACE methods are defined to be safe
41 |
42 | Ref: [4.2.1. Safe Methods](https://www.rfc-editor.org/rfc/rfc7231#section-4.2.1)
43 |
44 | Ref: https://stackoverflow.com/questions/45016234/what-is-idempotency-in-http-methods
45 |
46 |
47 | ## Why DELETE method is defined as idempotent
48 |
49 | Consider a client performs a `DELETE` request to delete a resource from the server. The server processes the request, the resource gets deleted and the server returns 204. Then the client repeats the same `DELETE` request and, as the resource has already been deleted, the server returns `404`.
50 |
51 | Despite the different status code received by the client, the effect produced by a single `DELETE` request is the same effect of multiple `DELETE` requests to the same URI.
52 |
53 | Finally, requests with idempotent methods can be repeated automatically if a communication failure occurs before the client is able to read the server's response. The client knows that repeating the request will have the same intended effect, even if the original request succeeded, though the response might be different.
54 |
55 | ## Why PATCH method is not idempotent
56 |
57 | PATCH method, which is neither safe nor idempotent. However, to prevent collisions, PATCH requests can be issued such a way as to be idempotent, as quoted below:
58 |
59 | A PATCH request can be issued in such a way as to be idempotent, which also helps prevent bad outcomes from collisions between two PATCH requests on the same resource in a similar time frame. Collisions from multiple PATCH requests may be more dangerous than PUT collisions because some patch formats need to operate from a known base-point or else they will corrupt the resource. Clients using this kind of patch application SHOULD use a conditional request such that the request will fail if the resource has been updated since the client last accessed the resource. For example, the client can use a strong ETag in an If-Match header on the PATCH request.
60 |
61 |
62 | ## When to Use Put and When Patch
63 |
64 | When a client needs to replace an existing Resource entirely, they can use PUT. When they're doing a partial update, they can use HTTP PATCH.
65 |
66 | For instance, when updating a single field of the Resource, sending the complete Resource representation can be cumbersome and uses a lot of unnecessary bandwidth. In such cases, the semantics of PATCH make a lot more sense.
67 |
68 | Another important aspect to consider here is idempotence. PUT is idempotent; PATCH can be idempotent but isn't required to be. So, depending on the semantics of the operation we're implementing, we can also choose one or the other based on this characteristic.
69 |
70 |
71 | Ref: https://www.baeldung.com/http-put-patch-difference-spring
72 |
73 | ## Why is PUT idempotent?
74 |
75 | In RFC 2616 citation, PUT is considered idempotent. When you PUT a resource, these two assumptions are in play:
76 |
77 | 1. You are referring to an entity, not to a collection.
78 | 2. The entity you are supplying is complete (the entire entity).
79 |
80 | Let's look at one of your examples.
81 |
82 | ```json
83 | { "username": "skwee357", "email": "skwee357@domain.com" }
84 | ```
85 |
86 | If you POST this document to `/users`, as you suggest, then you might get back an entity such as
87 |
88 | ```json
89 | ## /users/1
90 |
91 | {
92 | "username": "skwee357",
93 | "email": "skwee357@domain.com"
94 | }
95 | ```
96 |
97 | If you want to modify this entity later, you choose between PUT and PATCH. A `PUT` might look like this:
98 | ```json
99 | PUT /users/1
100 | {
101 | "username": "skwee357",
102 | "email": "skwee357@gmail.com" // new email address
103 | }
104 | ```
105 | You can accomplish the same using `PATCH`. That might look like this:
106 |
107 |
108 | ```json
109 | PATCH /users/1
110 | {
111 | "email": "skwee357@gmail.com" // new email address
112 | }
113 |
114 | ```
115 | Here The `PUT` included all of the parameters on this user, but `PATCH` only included the one that was being modified (email).
116 |
117 | When using PUT, it is assumed that you are sending the complete entity, and that complete entity replaces any existing entity at that URI. In the above example, the PUT and PATCH accomplish the same goal: they both change this user's email address. But PUT handles it by replacing the entire entity, while PATCH only updates the fields that were supplied, leaving the others alone.
118 |
119 | Since PUT requests include the entire entity, if you issue the same request repeatedly, it should always have the same outcome (the data you sent is now the entire data of the entity). Therefore PUT is idempotent.
120 |
121 | Ref: https://stackoverflow.com/questions/28459418/use-of-put-vs-patch-methods-in-rest-api-real-life-scenarios
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
--------------------------------------------------------------------------------
/misc/owasp.md:
--------------------------------------------------------------------------------
1 | Common Java Vulnerabilities
2 | ============
3 |
4 |
5 |
6 | Injection Flaws
7 | ------
8 | Injection flaws, particularly SQL injection, are common in Java EE applications. Injection occurs when user-supplied data is sent to an interpreter as part of a command or query. The attacker’s hostile data tricks the interpreter into executing unintended commands or changing data.
9 |
10 |
11 | **Primary Defenses:**
12 | Option 1: Use of Prepared Statements (with Parameterized Queries)
13 | Option 2: Use of Stored Procedures
14 | Option 3: Allow-list Input Validation
15 | Option 4: Escaping All User Supplied Input
16 |
17 |
18 | **Additional Defenses:**
19 | Also: Enforcing Least Privilege
20 | Also: Performing Allow-list Input Validation as a Secondary Defense
21 |
22 |
23 |
24 | Cross Site Scripting (XSS)
25 | ------------
26 | Cross-Site Scripting (XSS) attacks are a type of injection, in which malicious scripts are injected into otherwise benign and trusted websites. XSS attacks occur when an attacker uses a web application to send malicious code, generally in the form of a browser side script, to a different end user. Flaws that allow these attacks to succeed are quite widespread and occur anywhere a web application uses input from a user within the output it generates without validating or encoding it.
27 |
28 | An attacker can use XSS to send a malicious script to an unsuspecting user. The end user’s browser has no way to know that the script should not be trusted, and will execute the script. Because it thinks the script came from a trusted source, the malicious script can access any cookies, session tokens, or other sensitive information retained by the browser and used with that site. These scripts can even rewrite the content of the HTML page.
29 |
30 | Types of XSS attacks
31 |
32 | There are three main types of XSS attacks. These are:
33 |
34 | **Reflected XSS** : where the malicious script comes from the current HTTP request.
35 | **Stored XSS** : where the malicious script comes from the website's database.
36 | **DOM-based XSS** : where the vulnerability exists in client-side code rather than server-side code.
37 |
38 |
39 | How does XSS work?
40 | ------------
41 | Cross-site scripting works by manipulating a vulnerable web site so that it returns malicious JavaScript to users. When the malicious code executes inside a victim's browser, the attacker can fully compromise their interaction with the application.
42 |
43 |
44 |
45 |
46 |
47 | Cross Site Request Forgery(CSRF)
48 | ------
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 | For more information:
71 | 1. [SQL Injection Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html)
72 | 2. [Cross Site Scripting Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html)
73 | 3. [Cross-site scripting](https://portswigger.net/web-security/cross-site-scripting)
74 |
75 |
76 |
77 |
78 |
79 |
--------------------------------------------------------------------------------
/mongo-db/mongo-db.md:
--------------------------------------------------------------------------------
1 | MongoDB
2 | =================
3 |
4 | Wikipedia describes as
5 | > MongoDB is a source-available cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with optional schemas. MongoDB is developed by MongoDB Inc. and licensed under the Server Side Public License (SSPL).
6 |
7 | - MongoDB is an open-source NoSQL database written in C++ language. It uses JSON-like documents with optional schemas.
8 | - It provides easy scalability and is a cross-platform, document-oriented database.
9 | - MongoDB works on the concept of Collection and Document.
10 | - It combines the ability to scale out with features such as secondary indexes, range queries, sorting, aggregations, and geospatial indexes.
11 | - MongoDB is developed by MongoDB Inc. and licensed under the Server Side Public License (SSPL).
12 |
13 |
14 | MongoDB is a document-based database created in 2007 by 10gen software. It works on the concept of collections and documents. A MongoDB server can contain multiple databases and offers high performance along with redundancy and easy scalability. Collection in MongoDB can be considered as a group of documents that can have different types of fields. The document is a set of key-value pairs having a dynamic schema i.e. common fields may hold different types of data and all documents in the same collection need not have the same structure. If you are new to MongoDB but have worked with RDBMS like SQL before, this small table will help you correlate the terminologies:
15 |
16 | | RDBMS | MongoDB |
17 | | ----------- | ----------- |
18 | | Table | Collection |
19 | | Row | Document |
20 | | Column | Field |
21 | | Table join | Embedded documents |
22 |
23 | Key Components
24 | --------------
25 | 1. **_id**: The _id field represents a unique value in the MongoDB document. The _id field is like the document's primary key. If you create a new document without an _id field, MongoDB will automatically create the field.
26 | 2. **Collection**: This is a grouping of MongoDB documents. A collection is the equivalent of a table which is created in any other RDMS such as Oracle.
27 | 3. **Cursor**: This is a pointer to the result set of a query. Clients can iterate through a cursor to retrieve results.
28 | 4. **Database**: This is a container for collections like in RDMS wherein it is a container for tables. Each database gets its own set of files on the file system. A MongoDB server can store multiple databases.
29 | 5. **Document**: A record in a MongoDB collection is basically called a document. The document, in turn, will consist of field name and values.
30 | 6. **Field**: A name-value pair in a document. A document has zero or more fields. Fields are analogous to columns in relational databases.
31 |
32 | Document
33 | ----------
34 | A Document in MongoDB is an ordered set of keys with associated values. It is represented by a map, hash, or dictionary. In JavaScript, documents are represented as objects:
35 | ```json
36 | {"greeting" : "Hello world!"}
37 | ```
38 | Complex documents will contain multiple key/value pairs:
39 | ```json
40 | {"greeting" : "Hello world!", "views" : 3}
41 | ```
42 |
43 | Collection
44 | ----------
45 |
46 | A collection in MongoDB is a group of documents. If a document is the MongoDB analog of a row in a relational database, then a collection can be thought of as the analog to a table.
47 | Documents within a single collection can have any number of different "shapes.", i.e. collections have dynamic schemas.
48 |
49 | For example, both documents could be stored in a single collection:
50 | ```json
51 | {"greeting" : "Hello world!", "views": 3}
52 | {"signoff": "Good bye"}
53 | ```
54 |
55 | Namespace
56 | ----------
57 |
58 | MongoDB stores BSON (Binary Interchange and Structure Object Notation) objects in the collection. The concatenation of the collection name and database name is called a namespace
59 |
60 |
61 |
62 | Databases in MongoDB
63 | ----------
64 | MongoDB groups collections into databases. MongoDB can host several databases, each grouping together collections.
65 | Some reserved database names are as follows:
66 | ```
67 | admin
68 | local
69 | config
70 | ```
71 |
72 | Mongo Shell
73 | ----------
74 | It is a JavaScript shell that allows interaction with a MongoDB instance from the command line. With that one can perform administrative functions, inspecting an instance, or exploring MongoDB.
75 |
76 | To start the shell, run the mongo executable:
77 | ```shell
78 | $ mongod
79 | $ mongo
80 | MongoDB shell version: 4.2.0
81 | connecting to: test
82 | >
83 | ```
84 |
85 | The shell is a full-featured JavaScript interpreter, capable of running arbitrary JavaScript programs. Let’s see how basic math works on this:
86 | ```shell
87 | > x = 100;
88 | 200
89 | > x / 5;
90 | 20
91 | ```
92 |
93 | Advantages of MongoDB:
94 | --------------
95 |
96 | Some advantages of MongoDB are as follows:
97 |
98 | - Cross-platform
99 | - Written in C++.
100 | - Secure and scalable
101 | - Schema-less database
102 | - Document-oriented
103 | - Supports high-availability and redundancy
104 | - MongoDB supports field, range-based, string pattern matching type queries. for searching the data in the database
105 | - MongoDB support primary and secondary index on any fields
106 | - MongoDB basically uses JavaScript objects in place of procedures
107 | - MongoDB uses a dynamic database schema
108 | - MongoDB is very easy to scale up or down
109 | - MongoDB has inbuilt support for data partitioning (Sharding).
110 |
111 |
112 | Features of MongoDB
113 | --------------
114 |
115 | Feature | Description |
116 | | ----------- | ----------- |
117 | | Indexing | It indexes are created in order to improve the search performance. |
118 | | Replication | MongoDB distributes the data across different machines. |
119 | | Ad-hoc Queries | It supports ad-hoc queries by indexing the BSON documents & using a unique query language. |
120 | | Schemaless | It is very flexible because of its schema-less database that is written in C++. |
121 | | Sharding | MongoDB uses sharding to enable deployments with very large data sets and high throughput operations. |
122 |
123 | Indexes in MongoDB
124 | --------------
125 | MongoDB supports the types of the index for running a query.
126 | 1. Single Field Index
127 | 2. Compound Index
128 | 3. Multikey Index
129 | 4. Geospatial Index
130 | 5. Text Index
131 | 6. Hashed Index
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 | For more information:
141 | 1. [MongoDB Interview Questions and Answers](https://hackr.io/blog/mongodb-interview-questions)
142 | 2. [MongoDB Interview Questions](https://www.interviewbit.com/mongodb-interview-questions/)
143 | 3. [MongoDB Interview Questions For Beginners And Professionals In 2020](https://www.edureka.co/blog/mongodb-interview-questions-for-beginners-and-professionals)
--------------------------------------------------------------------------------
/oop/images/Association vs. Aggregation vs. Composition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/Association vs. Aggregation vs. Composition.png
--------------------------------------------------------------------------------
/oop/images/Association.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/Association.png
--------------------------------------------------------------------------------
/oop/images/Composition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/Composition.png
--------------------------------------------------------------------------------
/oop/images/aggregation between a professor and department.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/aggregation between a professor and department.png
--------------------------------------------------------------------------------
/oop/images/default-methods-error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/default-methods-error.png
--------------------------------------------------------------------------------
/oop/images/two-way association.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/oop/images/two-way association.png
--------------------------------------------------------------------------------
/react/react-main.md:
--------------------------------------------------------------------------------
1 | React
2 | =====
--------------------------------------------------------------------------------
/spring/Spring-Transaction-Management.md:
--------------------------------------------------------------------------------
1 | Spring Transaction Management:
2 | =================
3 | > A transaction is a logical unit of work that either completely succeeds or fails. Think about a banking transaction. Here, the unit of work is money debiting from Account A and money crediting to Account B. If one of them fails, the entire process fails. We call it a rollback of all the steps in the transaction if anything fails in between.
4 |
5 |
6 | Global Transactions
7 | --------------------
8 | There can be applications (very unlikely) where the transaction can happen between different databases. This is called distributed transaction processing. The transaction manager cannot sit within the application to handle it, rather it sits in the application server level. JTA or java transaction API is required with the support of JNDI to lookup different databases, and the transaction manager decides the commit or rollback of the distributed transaction. This is a complex process and requires knowledge at the application server level.
9 |
10 | Local Transactions
11 | --------------------
12 | Local transactions happen between the application and a singled RDBMS, such as a simple JDBC connection. With local transaction, all the transaction code is within our code.
13 |
14 | In both global and local transaction, we have to manage the transaction by ourselves. If I am using JDBC, then the transaction management API is for JDBC. If I am using Hibernate, then the hibernate transaction API and JTA at application server is for global transactions.
15 |
16 |
17 | Spring framework overcomes all of the these problems by providing an abstraction over the different transaction APIs, providing a consistent programming model. The abstraction is via org.springframework.transaction.PlatformTransactionManager interface. Here is the snippet of the interface:
18 |
19 | ```java
20 | public interface PlatformTransactionManager {
21 | TransactionStatus getTransaction(TransactionDefinition definition) throws TransactionException;
22 |
23 | void commit(TransactionStatus status) throws TransactionException;
24 |
25 | void rollback(TransactionStatus status) throws TransactionException;
26 | }
27 | ```
28 | There are various spring managed transaction managers that implement PlatformTransactionManager. Some of them are:
29 |
30 | - **org.springframework.orm.jpa.JpaTransactionManager** — For JPA transactions
31 | - **org.springframework.jdbc.datasource.DataSourceTransactionManager** — For JDBC transactions
32 | - **org.springframework.orm.hibernate5.HibernateTransactionManager** — For Hibernate transactions and it binds with SessionFactory
33 | - **org.springframework.transaction.jta.JtaTransactionManager** — For JTA transactions.
34 | - **org.springframework.transaction.jta.WebLogicJtaTransactionManager** — For Oracle Weblogic managed transaction
35 | - **org.springframework.transaction.jta.WebSphereUowTransactionManager** — For IBM Websphere Application Server managed transactions.
36 | - **org.springframework.jms.connection.JmsTransactionManager** — For JMS messaging transaction by binding JMS connection factory.
37 |
38 | Spring transactions can be managed by 2 approaches: programmatic and declarative.
39 |
40 | Programmatic Approach:
41 | --------------------
42 | Spring provides a programmatic approach in 2 ways :
43 |
44 | 1. Using the TransactionTemplate
45 | 2. Using a `PlatformTransactionManager` implementation directly
46 |
47 | The programmatic approach is not widely used, as the transaction management sits with the business logic. In an application where we have transactions for a few CRUD operations, the programmatic approach is preferred as transaction proxies can be a heavy operation.
48 |
49 |
50 | Declarative Approach (@Transactional)
51 | --------------------
52 | The declarative approach is widely used because transaction management stays out of business logic. It uses AOP proxies behind to drive transactions around method invocation with appropriate TransactionManager. It can be done either with annotation or with XML. But nowadays, most of the applications are annotation based, so I am covering how it works with the annotations.
53 |
54 |
55 | - **1. Use `@EnableTransactionManagement`** at the top of the configuration class, which has `@Configuration` annotation. This is the same as the XML tag:
56 |
57 | ```xml
58 |
59 | ```
60 |
61 | ```java
62 | @Configuration
63 | @EnableTransactionmanagement
64 | public class SpringConfiguration{
65 | ...........
66 | ...........
67 | }
68 | ```
69 |
70 | - **2. Define the datasource and transaction manager**
71 |
72 | ```java
73 | @Bean
74 | public FooRepository fooRepository() {
75 | // configure and return a class having @Transactional methods
76 | return new JdbcFooRepository(dataSource());
77 | }
78 |
79 | @Bean
80 | public DataSource dataSource() {
81 | // configure and return the necessary JDBC DataSource
82 | }
83 |
84 | @Bean
85 | public PlatformTransactionManager txManager() {
86 | return new DataSourceTransactionManager(dataSource());
87 | }
88 | }
89 | ```
90 |
91 | - **3. Use the @Transactional annotation** above the methods and concrete classes. If applied at class level, all the methods will be by default transactional.
92 |
93 | Let's try to understand how the annotation works with a simple example:
94 |
95 | Assume we have a sample service lass
96 |
97 | ```java
98 | Class SampleService {
99 | @Transactional
100 | public void serviceMethod(){
101 | //call to dao layer
102 | }
103 | }
104 | ```
105 |
106 | When SampleService is injected in another class, Spring will inject it in the below manner internally:
107 |
108 | ```java
109 | class ProxySampleService extends SampleService{
110 | private SampleService sampleService;
111 | public ProxySampleService(SampleService s){
112 | this.sampleService=s;
113 | }
114 | @Override
115 | public void sampleMethod(){
116 | try{
117 | //open transaction
118 | sampleService.sampleMethod();
119 | //close transaction
120 | }
121 | catch(Exception e){
122 | //rollback
123 | }
124 |
125 | }
126 |
127 | }
128 | ```
129 |
130 | This is the proxy design that works behind the scenes.
131 |
132 | Now let's see how we can fine tune the @Transactional annotation by changing the setting of the attributes.
133 |
134 | Settings of the attributes in @Transactional annotation:
135 |
136 | propagation
137 | -----------
138 | Optional setting for propagation. This is a very important attribute in setting the transactional behavior. I will cover a use case of it below.
139 | - **REQUIRED** — support a current transaction, create a new one if none exist
140 | - **REQUIRES_NEW** — create a new transaction and suspend the current transaction if none exist
141 | - **MANDATORY** — support a current transaction, throw an exception if none exists
142 | - **NESTED** — executes within a nested transaction if a current transaction exists
143 | - **SUPPORTS** — supports currents transaction but execute non-transactionally if none exists
144 |
145 | isolation
146 | -----------
147 | transaction isolation level. It decides the level to what the transaction should be isolated to other transactions
148 | - **DEFAULT** — default isolation level of the datasource
149 | - **READ_COMMITTED** — indicates dirty reads to be prevented, non-repeatable, and phantom reads can occur.
150 | - **READ_UNCOMMITTED** — indicates that dirty reads, non-repeatable, and phantom reads can occur
151 | - **REPEATABLE_READ** — indicates dirty and non-repeatable reads are prevented but phantom reads can occur
152 | - **SERIALIZABLE** — indicates dirty read phantom read, and non-repeatable reads are prevented
153 |
154 |
155 | we also have other settings
156 |
157 | - **readOnly** whether the transaction is read-only or read/write
158 | - **timeout** — transaction timeout
159 | - **rollbackFor** — arrays of exception class objects that must cause a rollback of the transaction
160 | - **rollbackForClassName** — arrays of exception class names that must cause a rollback of the transaction
161 | - **noRollbackFor** — arrays of exception class objects that must not cause a rollback of the transaction
162 | - **noRollbackForClassName** — arrays of exception class names that must not cause a rollback of the transaction
163 |
164 | ```java
165 |
166 | ```
167 |
168 | For more information:
169 |
170 | 1. [Spring Transactional Management](https://dzone.com/articles/bountyspring-transactional-management)
--------------------------------------------------------------------------------
/spring/annotations.md:
--------------------------------------------------------------------------------
1 | Annotations
2 | ==========
3 |
4 | @Autowired
5 | ----------
6 |
7 | Since version 2.5, Spring provides the @Autowired annotation to discover the beans automatically and inject collaborating beans (other associated dependent beans) into our bean.
8 |
9 | By declaring all the beans in Spring Configuration file, Spring container can autowire relationships between collaborating beans.
10 |
11 | After enabling annotation based injection, now we can use @Autowired annotation. @Autowired can be used on following injection points.
12 |
13 | 1. Constructors
14 | 2. Methods
15 | 3. Fields and
16 | 4. Parameters
17 |
18 | and dependencies can be injected using by type OR by name OR by @Qualifier.
19 |
20 |
21 | In spring there are two types of autowiring. Those are
22 | - **Autowiring by type** : @Autowired by type uses the class type to autowire the spring boot bean class. The bean is autowired based on the type of the variable.
23 | - **Autowiring by name** : For Autowiring by name, the name of the variable is used for the dependency injection. The name of the authoring variable should be the same as the name of the class or the bean name configured in the @Component annotation.
24 |
25 | For example,
26 |
27 | ```java
28 | public interface Shape {
29 | public void draw();
30 | }
31 |
32 | @Component
33 | public class Rectangle implements Shape {
34 | @Override
35 | public void draw() {
36 | System.out.println(">>>>>>>>>>>>>INVOKING THE RECTANGLE INSTANCE<<<<<<<<<<<<<<<<");
37 | }
38 | }
39 |
40 | @Component
41 | public class Circle implements Shape{
42 | @Override
43 | public void draw() {
44 | System.out.println(">>>>>>>>>>>>>INVOKING THE CIRCLE INSTANCE<<<<<<<<<<<<<<<<");
45 | }
46 | }
47 |
48 | ```
49 | the shape interface is implemented by two classes Circle and Rectangle. So we can say both are instances of Shape or both shape. It made Rectangle and Circle as spring beans using the annotation @Component. Now let's see how to autowire these beans in another class.
50 |
51 | ```java
52 | @Component
53 | public class ShapeService {
54 | @Autowired
55 | private Shape rectangle;//by name
56 |
57 | @Autowired
58 | private Rectangle myRectangle;//by type
59 |
60 | }
61 | ```
62 |
63 | Here in ShapeService class, it is autowring the shape Rectangle in two ways.
64 | 1. Here in the first @Autowiring, the variable rectangle is autowired based on the name of the variable. Here when the spring checks the type of the variable, he can see it is Shape. But there are two shape implementations are there Rectangle and Circle. So spring doesn't get a proper solution for what component need to autowire. Then the spring check the name of the variable(rectangle) and find out any Shape component with the same name is available. Yes…The Rectangle component is available. So the spring will inject the property with rectangle component.
65 | 2. In the second @Autowiring, the type of property is Rectangle. So the spring directly injects the Rectangle component to the property myRectangle.
66 |
67 |
68 | @Inject vs @Autowired
69 | ---------------------
70 |
71 | | Key | @Inject |@Autowired |
72 | |-----------------|-----------------------------------|-----------------|
73 | | Basic | It is part of Java CDI(Contexts and Dependency Injection) | It is part of Spring framework |
74 | | Required | It has no required attribute | It has required attribute |
75 | | Default Scope | Default scope of the autowired beans is Singleton | Default scope of the inject beans is prototype |
76 | | Ambiguity | In case of ambiguity in beans for injection then @Named qualifier should be added in your code. | In case of ambiguity in beans for injection then @Qualifer qualifier should be added in your code. |
77 | | Advantage | It is a part of Java CDI so it is not dependent on any DI framework. It makes your system loosely coupled. | It makes your application tightly coupled with Spring framework. In the future , if you want to move to another DI framework then you need reconfigure your application. |
78 |
79 |
80 | Component Scanning
81 | ---------------------
82 | To do dependency injection, Spring creates a so-called application context.
83 |
84 | During startup, Spring instantiates objects and adds them to the application context. Objects in the application context are called “Spring beans” or “components”.
85 |
86 | Spring resolves dependencies between Spring beans and injects Spring beans into other Spring beans’ fields or constructors.
87 |
88 | The process of searching the classpath for classes that should contribute to the application context is called component scanning.
89 |
90 | When developing Spring Boot applications, you need to tell the Spring Framework where to look for Spring components. Using component scan is one method of asking Spring to detect Spring managed components. Spring needs the information to locate and register all the Spring components with the application context when the application starts.
91 |
92 | Spring can auto scan, detect, and instantiate components from pre-defined project packages. It can auto scan all classes annotated with the stereotype annotations @Component @Controller, @Service and @Repository
93 |
94 |
95 | @ComponentScan
96 | ---------------
97 | @ComponentScan tells Spring in which packages you have annotated classes which should be managed by Spring. So, for example, if you have a class annotated with @Controller which is in a package which is not scanned by Spring, you will not be able to use it as Spring controller.
98 |
99 | Classes annotated with @Configuration is a new way of configuring Spring using annotations instead of XML files (it's called Java configuration). Spring needs to know which packages contain spring beans, otherwise you would have to register each bean individually. That's what @ComponentScan is used for.
100 |
101 |
102 | @ComponentScan Without Arguments
103 | ---------------
104 | we use the @ComponentScan annotation along with the @Configuration annotation to specify the packages that we want to be scanned. @ComponentScan without arguments tells Spring to scan the current package and all of its sub-packages.
105 | ```java
106 | @Configuration
107 | @ComponentScan
108 | public class DemoAppConfig {
109 | //...
110 | }
111 | ```
112 |
113 | @ComponentScan With Arguments
114 | ---------------
115 | ```java
116 | @Configuration
117 | @ComponentScan(basePackages = {"basic.ioc.autowire", "basic.ioc.setter"})
118 | public class AutowireBeanConfig {
119 | //other configs
120 | }
121 | ```
122 |
123 | @ComponentScan with Exclusions
124 | ---------------
125 |
126 | Use a filter,with the pattern for the classes to exclude:
127 |
128 | ```java
129 | @Configuration
130 | @ComponentScan(basePackages = "com.demo",
131 | includeFilters = @Filter(type = FilterType.REGEX, pattern = ".*Dao"),
132 | excludeFilters = @Filter(Repository.class))
133 | public class AppConfig {
134 | ...
135 | }
136 | ```
137 |
138 | @ComponentScan in a Spring-Boot application
139 | ---------------
140 | Spring-Boot application, we don’t need to specify the @Configuration annotation unless we want more control over the classpath scanning. This is because of the @SpringBootApplication , which is already a combination of below listed three annotations.
141 |
142 | - @Configuration
143 | - @EnableAutoConfiguration
144 | - @ComponentScan
145 |
146 |
147 |
148 | Difference between @Component, @Repository & @Service annotations?
149 | ---------------
150 |
151 | From [Spring Documentation](https://docs.spring.io/spring-framework/docs/current/reference/html/core.html#beans-stereotype-annotations):
152 |
153 | Spring provides stereotype annotations: @Component, @Service, and @Controller. @Component is a generic stereotype for any Spring-managed component. @Repository, @Service, and @Controller are specializations of @Component for more specific use cases (in the persistence, service, and presentation layers, respectively). Therefore, you can annotate your component classes with @Component, but, by annotating them with @Repository, @Service, or @Controller instead, your classes are more properly suited for processing by tools or associating with aspects.
154 |
155 | @Repository
156 | -----------
157 | stereotype for persistence layer
158 |
159 | @Repository’s job is to catch persistence-specific exceptions and re-throw them as one of Spring’s unified unchecked exceptions.
160 |
161 | For this, Spring provides `PersistenceExceptionTranslationPostProcessor`, which we are required to add in our application context (already included if we're using Spring Boot):
162 | ```java
163 |
164 | ```
165 | This bean post processor adds an advisor to any bean that’s annotated with @Repository.
166 |
167 | @Service
168 | ---------
169 | stereotype for service layer
170 |
171 | We mark beans with @Service to indicate that they're holding the business logic. Besides being used in the service layer, there isn't any other special use for this annotation.
172 |
173 | @Controller
174 | -----------
175 | stereotype for presentation layer (spring-mvc)
176 |
177 | Instead of using @Component on a controller class in Spring MVC, we use @Controller, which is more readable and appropriate.
178 |
179 | By using that annotation we do two things, first, we declare that this class is a Spring bean and should be created and maintained by Spring ApplicationContext, but also we indicate that its a controller in MVC setup. This latter property is used by web-specific tools and functionalities.
180 |
181 | For example, DispatcherServlet will look for @RequestMapping on classes that are annotated using @Controller but not with @Component.
182 |
183 | This means @Component and @Controller are the same with respect to bean creation and dependency injection but later is a specialized form of former. Even if you replace @Controller annotation with @Compoenent, Spring can automatically detect and register the controller class but it may not work as you expect with respect to request mapping.
184 |
185 |
186 |
187 | For more information:
188 |
189 | 1. [Spring – @Autowired](https://javabydeveloper.com/tutorial-on-spring-autowired/)
190 | 2. [Core Spring Framework Annotations](https://medium.com/javarevisited/core-spring-framework-annotations-300493ba85da)
191 | 3. [Spring Component Scanning](https://www.baeldung.com/spring-component-scanning)
192 | 4. [Classpath Scanning using @ComponentScan and Filters](https://jstobigdata.com/spring/classpath-scanning-using-componentscan-and-filters/)
--------------------------------------------------------------------------------
/spring/http-Verbs.md:
--------------------------------------------------------------------------------
1 | HTTP Verbs
2 | ==========
3 |
4 |
5 | GET
6 | ----
7 | The GET method is designed to request a specific resource. In essence, it literally "gets" the resource in question, and is pretty limited to just that action. GET requests should only retrieve data, leaving other methods to perform the other transformative actions.
8 |
9 | The HTTP GET method is used to `read` (or retrieve) a representation of a resource. In the "happy" (or non-error) path, GET returns a representation in XML or JSON and an HTTP response code of 200 (OK). In an error case, it most often returns a 404 (NOT FOUND) or 400 (BAD REQUEST).
10 |
11 | According to the design of the HTTP specification, GET (along with HEAD) requests are used only to read data and not change it. Therefore, when used this way, they are considered safe. That is, they can be called without risk of data modification or corruption—calling it once has the same effect as calling it 10 times, or none at all. Additionally, GET (and HEAD) is idempotent, which means that making multiple identical requests ends up having the same result as a single request.
12 |
13 | Do not expose unsafe operations via GET—it should never modify any resources on the server.
14 |
15 | **Examples:**
16 | - GET http://www.example.com/customers/12345
17 | - GET http://www.example.com/customers/12345/orders
18 | - GET http://www.example.com/buckets/sample
19 |
20 | POST
21 | ----
22 | POST affects the related resources attached to a targeted resource. POST allows an API to submit an attribute or entity to a given resource; in practice, this means the targeted resource receives a subordinate resource that is part of a larger collection
23 |
24 | The POST verb is most-often utilized to `create` new resources. In particular, it's used to create subordinate resources. That is, subordinate to some other (e.g. parent) resource. In other words, when creating a new resource, POST to the parent and the service takes care of associating the new resource with the parent, assigning an ID (new resource URI), etc.
25 |
26 | On successful creation, return HTTP status 201, returning a Location header with a link to the newly-created resource with the 201 HTTP status.
27 |
28 | POST is neither safe nor idempotent. It is therefore recommended for non-idempotent resource requests. Making two identical POST requests will most-likely result in two resources containing the same information.
29 |
30 | **Examples:**
31 | - POST http://www.example.com/customers
32 | - POST http://www.example.com/customers/12345/orders
33 |
34 | PUT
35 | ----
36 | PUT is somewhat the polar opposite of GET. While GET requests a specific resource, PUT places that resource in the remote directory. It should be noted that PUT assumes either the resource does not exist or the resource is fine to be overwritten – when using PUT, all representations of the target resource will be replaced by the payload.
37 |
38 | PUT is most-often utilized for `update` capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource.
39 |
40 | However, PUT can also be used to create a resource in the case where the resource ID is chosen by the client instead of by the server. In other words, if the PUT is to a URI that contains the value of a non-existent resource ID. Again, the request body contains a resource representation. Many feel this is convoluted and confusing. Consequently, this method of creation should be used sparingly, if at all.
41 |
42 | Alternatively, use POST to create new resources and provide the client-defined ID in the body representation—presumably to a URI that doesn't include the ID of the resource (see POST below).
43 |
44 | On successful update, return 200 (or 204 if not returning any content in the body) from a PUT. If using PUT for create, return HTTP status 201 on successful creation. A body in the response is optional—providing one consumes more bandwidth. It is not necessary to return a link via a Location header in the creation case since the client already set the resource ID.
45 |
46 | PUT is not a safe operation, in that it modifies (or creates) state on the server, but it is idempotent. In other words, if you create or update a resource using PUT and then make that same call again, the resource is still there and still has the same state as it did with the first call.
47 |
48 | If, for instance, calling PUT on a resource increments a counter within the resource, the call is no longer idempotent. Sometimes that happens and it may be enough to document that the call is not idempotent. However, it's recommended to keep PUT requests idempotent. It is strongly recommended to use POST for non-idempotent requests.
49 |
50 | **Examples:**
51 | - PUT http://www.example.com/customers/12345
52 | - PUT http://www.example.com/customers/12345/orders/98765
53 | - PUT http://www.example.com/buckets/secret_stuff
54 |
55 | DELETE
56 | ----
57 | DELETE is the most clear-cut method on this list because it does exactly what’s on the tin – DELETE deletes a targeted resource. The typical response for a deletion method is simply to reply with an "OK" status – either the resource was deleted, or it was not.
58 |
59 | DELETE is pretty easy to understand. It is used to `delete` a resource identified by a URI.
60 |
61 | On successful deletion, return HTTP status 200 (OK) along with a response body, perhaps the representation of the deleted item (often demands too much bandwidth), or a wrapped response (see Return Values below). Either that or return HTTP status 204 (NO CONTENT) with no response body. In other words, a 204 status with no body, or the JSEND-style response and HTTP status 200 are the recommended responses.
62 |
63 | HTTP-spec-wise, DELETE operations are idempotent. If you DELETE a resource, it's removed. Repeatedly calling DELETE on that resource ends up the same: the resource is gone. If calling DELETE say, decrements a counter (within the resource), the DELETE call is no longer idempotent. As mentioned previously, usage statistics and measurements may be updated while still considering the service idempotent as long as no resource data is changed. Using POST for non-idempotent resource requests is recommended.
64 |
65 | There is a caveat about DELETE idempotence, however. Calling DELETE on a resource a second time will often return a 404 (NOT FOUND) since it was already removed and therefore is no longer findable. This, by some opinions, makes DELETE operations no longer idempotent, however, the end-state of the resource is the same. Returning a 404 is acceptable and communicates accurately the status of the call.
66 |
67 | **Examples:**
68 | - DELETE http://www.example.com/customers/12345
69 | - DELETE http://www.example.com/customers/12345/orders
70 | - DELETE http://www.example.com/bucket/sample
71 |
72 | PATCH
73 | ----
74 | PATCH is designed to partially modify a targeted resource. In other words, while PUT places a resource in the target service, PATCH modifies that resource, as opposed to replacing it. This is a good way to update files or versions.
75 |
76 | PATCH is used for `modify` capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource.
77 |
78 | This resembles PUT, but the body contains a set of instructions describing how a resource currently residing on the server should be modified to produce a new version. This means that the PATCH body should not just be a modified part of the resource, but in some kind of patch language like JSON Patch or XML Patch.
79 |
80 | PATCH is neither safe nor idempotent. However, a PATCH request can be issued in such a way as to be idempotent, which also helps prevent bad outcomes from collisions between two PATCH requests on the same resource in a similar time frame. Collisions from multiple PATCH requests may be more dangerous than PUT collisions because some patch formats need to operate from a known base-point or else they will corrupt the resource. Clients using this kind of patch application should use a conditional request such that the request will fail if the resource has been updated since the client last accessed the resource. For example, the client can use a strong ETag in an If-Match header on the PATCH request.
81 |
82 | **Examples:**
83 | - PATCH http://www.example.com/customers/12345
84 | - PATCH http://www.example.com/customers/12345/orders/98765
85 | - PATCH http://www.example.com/buckets/secret_stuff
86 |
87 | OPTIONS
88 | ----
89 | The OPTIONS method changes the different communication options held by the target resource. This can be used to both open up more channels of communication and lock them down – for this reason, the OPTIONS method should be considered akin to a faucet which can be opened and closed with relative granularity.
90 |
91 | HEAD
92 | ----
93 | HEAD is an interesting method in that it mirrors some functionality of another METHOD while unlocking additional possibilities. HEAD requests a GET response from a given resource, but with the response body excised. While that may seem overly simplistic, it allows greater flexibility and power for other API extensions – for instance, you can pass the headers of a resource to another request in an attempt to mimic a different requesting environment or situation, which is extremely helpful for testing and troubleshooting.
94 |
95 | HTTP Status Codes
96 | ----
97 |
98 |
99 | 1×× Informational
100 | ----
101 |
102 | 100 Continue
103 | 101 Switching Protocols
104 | 102 Processing
105 |
106 | 2×× Success
107 | ----
108 |
109 | 200 OK
110 | 201 Created
111 | 202 Accepted
112 | 203 Non-authoritative Information
113 | 204 No Content
114 | 205 Reset Content
115 | 206 Partial Content
116 | 207 Multi-Status
117 | 208 Already Reported
118 | 226 IM Used
119 |
120 | 3×× Redirection
121 | ----
122 |
123 | 300 Multiple Choices
124 | 301 Moved Permanently
125 | 302 Found
126 | 303 See Other
127 | 304 Not Modified
128 | 305 Use Proxy
129 | 307 Temporary Redirect
130 | 308 Permanent Redirect
131 |
132 | 4×× Client Error
133 | ----
134 |
135 | 400 Bad Request
136 | 401 Unauthorized
137 | 402 Payment Required
138 | 403 Forbidden
139 | 404 Not Found
140 | 405 Method Not Allowed
141 | 406 Not Acceptable
142 | 407 Proxy Authentication Required
143 | 408 Request Timeout
144 | 409 Conflict
145 | 410 Gone
146 | 411 Length Required
147 | 412 Precondition Failed
148 | 413 Payload Too Large
149 | 414 Request-URI Too Long
150 | 415 Unsupported Media Type
151 | 416 Requested Range Not Satisfiable
152 | 417 Expectation Failed
153 | 418 I'm a teapot
154 | 421 Misdirected Request
155 | 422 Unprocessable Entity
156 | 423 Locked
157 | 424 Failed Dependency
158 | 426 Upgrade Required
159 | 428 Precondition Required
160 | 429 Too Many Requests
161 | 431 Request Header Fields Too Large
162 | 444 Connection Closed Without Response
163 | 451 Unavailable For Legal Reasons
164 | 499 Client Closed Request
165 |
166 | 5×× Server Error
167 | ----
168 |
169 | 500 Internal Server Error
170 | 501 Not Implemented
171 | 502 Bad Gateway
172 | 503 Service Unavailable
173 | 504 Gateway Timeout
174 | 505 HTTP Version Not Supported
175 | 506 Variant Also Negotiates
176 | 507 Insufficient Storage
177 | 508 Loop Detected
178 | 510 Not Extended
179 | 511 Network Authentication Required
180 | 599 Network Connect Timeout Error
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
--------------------------------------------------------------------------------
/spring/images/spring-mvc-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunilsoni/interviewnotes/caf1fa63490f903cb16fe61df1c441ca5b9f7563/spring/images/spring-mvc-flow.png
--------------------------------------------------------------------------------
/spring/spring-aop.md:
--------------------------------------------------------------------------------
1 | # Spring AOP
2 |
3 |
4 |
5 |
6 | https://www.tutorialspoint.com/springaop/springaop_core_concepts.htm
7 |
8 | https://dzone.com/articles/spring-aop-tutorial-with-examples
9 |
--------------------------------------------------------------------------------
/spring/swagger.md:
--------------------------------------------------------------------------------
1 | Swagger
2 | =======
3 |
4 | Swagger is a very good open source tool for documenting REST based APIs provided by microservices. It provides very easy to use interactive documentation.
5 |
6 | By the use of swagger annotation on REST endpoint, api documentation can be autogenerated and exposed over the web interface. Internal and external team can use web interface, to see the list of APIs and their inputs & error codes. They can even invoke the endpoints directly from web interface to get the results.
7 |
8 | Swagger UI is a very powerful tool for your microservices consumers to help them understand set of endpoints provided by a given microservice.
9 |
10 | Integrate Swagger into your microservices
11 | -----------
12 | Integrating swagger into Spring Boot based application should be straight forward. You need to add swagger dependencies into `build.gradle`, provide swagger configuration and finally make some tweaks into WebMvcConfig to allow swagger-ui into your project.
13 |
14 | **build.gradle - add swagger dependencies.**
15 | ```xml
16 | dependencies {
17 | compile('org.springframework.cloud:spring-cloud-starter-config')
18 | // https://mvnrepository.com/artifact/io.springfox/springfox-swagger2
19 | compile group: 'io.springfox', name: 'springfox-swagger2', version: '2.8.0'
20 | compile group: 'io.springfox', name: 'springfox-swagger-ui', version: '2.8.0'
21 | ```
22 |
23 | **Second step is to define swagger configuration:**
24 | SwaggerConfig.java.
25 | ```java
26 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
27 | import org.springframework.context.annotation.*;
28 | import springfox.documentation.builders.*;
29 | import springfox.documentation.service.*;
30 |
31 | @Configuration
32 | @EnableSwagger2
33 | @EnableAutoConfiguration
34 | public class SwaggerConfig {
35 | @Bean
36 | public Docket productApi() {
37 | return new Docket(DocumentationType.SWAGGER_2)
38 | .groupName("Product Service")
39 | .apiInfo(apiInfo())
40 | .select()
41 | .apis(RequestHandlerSelectors.basePackage("hello"))
42 | .paths(PathSelectors.any())
43 | .build();
44 | }
45 | private ApiInfo apiInfo() {
46 | return new ApiInfoBuilder()
47 | .title("Product Service with Swagger")
48 | .description("Spring REST Sample with Swagger")
49 | .termsOfServiceUrl("http://www-03.ibm.com/software/sla/sladb.nsf/sla/bm?Open")
50 | .contact(new Contact("Munish Chandel", "","munish.chandel@outlook.com"))
51 | .license("Apache License Version 2.0")
52 | .licenseUrl("https://github.com/IBM-Bluemix/news-aggregator/blob/master/LICENSE")
53 | .version("1.0")
54 | .build();
55 | }
56 | }
57 |
58 | ```
59 | **Lastly, add the below WebMvcConfig to enable swagger UI**
60 |
61 | ```java
62 | import org.slf4j.Logger;
63 | import org.slf4j.LoggerFactory;
64 | import org.springframework.context.annotation.Configuration;
65 | import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
66 | import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
67 |
68 | @Configuration
69 | public class WebMvcConfig extends WebMvcConfigurerAdapter {
70 | private static final Logger logger = LoggerFactory.getLogger(WebMvcConfig.class);
71 | @Override
72 | public void addResourceHandlers(ResourceHandlerRegistry registry) {
73 | super.addResourceHandlers(registry);
74 | registry.addResourceHandler("swagger-ui.html")
75 | .addResourceLocations("classpath:/META-INF/resources/");
76 | /*registry.addResourceHandler("/webjars/**")
77 | .addResourceLocations("classpath:/META-INF/resources/webjars/");*/
78 | }
79 | }
80 |
81 | ```
82 | Now swagger is configured for use in your application.
83 |
84 |
85 |
86 | Swagger annotations
87 | -----------
88 | Then the resource class can have annotations such as:
89 |
90 | - `@Api`: To mark a resource as a Swagger resource
91 | - `@ApiOperation`: Describes an operation or typically an HTTP method against a specific path
92 | - `@ApiResponse`: To describe the response of a method
93 | - `@ApiParam`: Additional metadata for operational parameters of a method
94 |
95 | Maven plugin
96 | -----------
97 | A Maven plugin can be used to generate the swagger.yaml file based on the metadata placed on the code:
98 | ```xml
99 |
100 | ...
101 |
102 | com.github.kongchen
103 | swagger-maven-plugin
104 | 3.1.5
105 |
106 |
107 |
108 | false
109 | org.jee8ng.users.boundary
110 | http
111 | localhost:8081
112 | /${project.build.finalName}/resources
113 |
114 |
115 | Users API
116 | v1
117 | Users rest endpoints
118 |
119 | yaml
120 | ${basedir}/src/main/webapp
121 |
122 |
123 |
124 |
125 |
126 |
127 | compile
128 |
129 | generate
130 |
131 |
132 |
133 |
134 | ...
135 |
136 | ```
137 |
138 | The swaggerDirectory is where the swagger.yaml file gets generated. This way, it's possible to use a combination of plugins and annotations to create the Swagger Spec format with the desired output, such as JSON, configured here. The plugin and API details can be explored further on the Swagger website and on the GitHub pages of the plugin.
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
--------------------------------------------------------------------------------
/unix/Unix.md:
--------------------------------------------------------------------------------
1 | Unix
2 | =====
3 |
4 | Table of Contents
5 | --------------------
6 |
7 | * [Find](#find)
8 | * [Search](#search)
9 | * [Processes](#processes)
10 | * [File permissions](#file-permissions)
11 | * [List files](#list-files)
12 | * [Symlinks](#symlinks)
13 | * [Checking logs](#checking-logs)
14 | * [Text manipulation](#text-manipulation)
15 | * [Sed](#sed)
16 | * [Networking](#networking)
17 | * [Space](#space)
18 | * [Miscellaneous](#miscellaneous)
19 |
20 |
21 | Find
22 | --------------------
23 |
24 | - `find dir -type f` Find all files in the directory
25 | - `find dir -type d` File all directories within this directory
26 | - `find dir -name "a*.mp3" -print0` Find all files/directory with name like a***.mp3
27 | - `find dir -type f -name "a*.txt"` Find all files (recursively) with name like a***.txt
28 | - `find dir -type f -print0 | xargs -0 grep -l "word"` List file names with matching word
29 | - `find dir -type f -print0 | xargs -0 mv -t` Move all files from dir tree into single directory
30 |
31 | Search
32 | --------------------
33 |
34 | - `grep "word" file` All matching lines in the file
35 | - `grep -i "word" file` All matching lines - Ignore case
36 | - `grep -r "word" dir` All matching lines in (recursively) all files or directory
37 | - `grep -c "word" file` Count of matching lines
38 | - `grep "word" file | wc -l` Count of matching lines
39 | - `grep -v "word" file | wc -l` Count of non-matching lines
40 | - `grep -o -w "word" file | wc -w` Word count
41 | - `grep -l -r "word" dir` List file names with matching word
42 | - `find dir -type f -print0 | xargs -0 grep -l "word"` List file names with matching word
43 |
44 | Processes
45 | --------------------
46 |
47 | - `nohup program.sh > mylog.txt 2>&1 &`
48 | + `&` starts process as child process in the background
49 | + `nohup` Ctrl-C or terminating session sends kill signal to all child processes, `nohup` catches and ignores it
50 | + `>` redirect output (from left/source to right/target)
51 | + `2` error stream
52 | + `1` output stream
53 | + `2>&1` append all errors to output
54 | - `top` Check CPU and memory consumption
55 | - `kill -9 processId` kill the process with that pid
56 | - `kill -3 processId` for a JVM process, will write thread dump to output stream
57 | - `ps -ef | grep word` find process with matching word
58 | - `echo $?` print output of last process
59 |
60 | File permissions
61 | --------------------
62 |
63 | - `chmod file` Change
64 | - `chmod +x file` Add execute permission
65 | - `chmod +w file` Add write permission
66 | - `chmod +r file` Add read permission
67 | - `chmod 755 file` Change permission
68 | - `chmod -R 755 dir` Change permission recursively
69 | - `chmod g+w file` Add write permission at group level
70 | - Numbers: 1=x, 2=w, 3=w+x, 4=r, 5=r+x, 6=r+w, 7=r+w+x
71 | - References: u=user, g=group, o=others, a=all
72 |
73 | List files
74 | --------------------
75 |
76 | - `ls` list files in this directory
77 | - `ls -ltr` list files in this directory with more details and in reverse chronological order
78 | - `ls -ltra` same as above + list hidden files (names starting with `.`)
79 | - `ls -ltr | grep "^l"` Lists all symlinks (`ls -ltr` lists symlinks with `l` as first character of output)
80 |
81 | Symlinks
82 | --------------------
83 |
84 | - Symbolic or soft links. Pointer to a file.
85 | - `ln -s /java/jdk7 jdk` create symlink jdk which points to `/java/jdk7`
86 | - `ln -ns /java/jdk8 jdk` update symlink jdk to point to `/java/jdk8`
87 |
88 |
89 | Checking logs
90 | --------------------
91 |
92 | - `tail -f logfile` output appended data as file grows (f=follow)
93 | - `less +F logfile` open file, and output appended data as file grows
94 |
95 | Text manipulation
96 | --------------------
97 |
98 | - `head file` display first 10 lines
99 | - `head -20 file` display first 20 lines
100 | - `tail file` display last 10 lines
101 | - `tail -5 file` display last 5 lines
102 | - `head -20 file | tail -1` display 20th line
103 | - `cut -f3 file` display 3rd field for each line, tab as separator
104 | - `cut -d',' -f3 file` display 3rd field for each line, `,` as separator
105 | - `cut -c3-6 file` display characters from 3rd to 6th position for each line
106 | - `cut -c7- file` display characters from 7th to end of each line
107 |
108 | Sed
109 | --------------------
110 |
111 | - `sed s/unix/linux file` replace first occurrence of "unix with "linux"
112 | - `sed s/unix/linux/3 file` replace 3rd occurrence
113 | - `sed s/unix/linux/g file` replace all occurrences
114 | - `sed s/unix/linux/3g file` replace all occurrences starting from 3rd one
115 | - `sed -i '1 d' file` delete header of file
116 | - `sed -i '$ d' file` delete footer of file
117 | - `sed –n '10 p' file` print 10th line of the file
118 |
119 | Networking
120 | --------------------
121 |
122 | - `ping hostname` or `telnet hostname port` check if remote host is alive
123 | - `scp /dir/file user@hostname:/targetdir/file` copy file to different host
124 | - `netstat -a | grep "port"` check host connected to machine's port
125 |
126 | Space
127 | --------------------
128 |
129 | - `df -h` space in current drive (`-h` is human readable format)
130 | - `du -h .` sizes of all files in current directory
131 | - `du -sh .` sizes of current directory
132 | - `du /bin/* | sort -n` sort all files based on size (asc)
133 |
134 |
135 | Miscellaneous
136 | --------------------
137 |
138 | - `lsof file` list processes using the file
139 | - `rev file` reverses the text in each line
140 | - `echo "string" | rev ` reverses the string
141 | - `sort file` sorts file lines alphabetically
142 | - `sort -n file` sorts file lines numerically
143 | - `uniq file` filter out adjacent duplicate lines and print
144 | - `uniq -c file` print with count at the begining of each line of output
145 | - `uniq -d file` print only duplicates
146 |
147 |
148 | For more information:
149 |
150 | - [Art of Command Line](https://github.com/jlevy/the-art-of-command-line#processing-files-and-data)
--------------------------------------------------------------------------------