You are on page 1of 34

[

{ "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Bubble sort is a simple and well-known sorting algorithm. I
t is used in practice once in a blue moon and its main application is to make an
introduction to the sorting algorithms. Bubble sort belongs to O(n2) sorting al
gorithms, which makes it quite inefficient for sorting large data volumes. Bubbl
e sort is stable and adaptive. \n\nAlgorithm:\n\n1. Compare each pair of adjacen
t elements from the beginning of an array and, if they are in reversed order, sw
ap them.\n2. If at least one swap has been done, repeat step 1.\n\nYou can [imag
ine] that on every step big bubbles float to he surface and stay there. At the s
tep, when no bubble moves, sorting stops",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "static void bubble_sort_generic<T>( T[] array ) where T :
IComparable\n{\n
long right_border = array.Length - 1;\n
do\n
{\n
long last_exchange = 0;\n
for ( long i = 0; i < right_border; i++ )\n
{\n
if ( array[i].CompareTo(array[i + 1]) > 0 )\n
{\
n
T temp = array[i];\n
array[i] = array[i + 1];\n
array[i + 1] = temp;\n
last_exchange = i;\n
}\n
}\n
right_border = last_exchange;\n
}\n
while ( rig
ht_border > 0 );\n}",
"template<typename IT> \nvoid bubble_sort(IT begin, IT end)\n{\n
bool done(false);\n
while (begin != end && !done) {\n
done = true;
\n
IT it(begin);\n
IT next(begin);\n
++next;\n
while
(next != end) {\n
if (*next < *it) {\n
using ::std::s
wap;\n
swap(*next, *it);\n
done = false;\n
}\n
++it;\n
++next;\n
}\n
--end;\n
}\
n}",
"public void sort(E[] array) {\n
boolean sorted = false;\n
for (int i = 0; (i < array.length) && !sorted; i++) {\n
sorted = true;\n
for (int j = array.length - 1; j > i; j--) {\n
if (array[j].c
ompareTo(array[j - 1]) < 0) {\n
sorted = false;\n
swap(array, j, j - 1);\n
}\n
}\n
}\n}\n\nprivate static <E>
void swap(E[] array, int current_pos, int new_position) {\n
E temp = array[c
urrent_pos];\n
array[current_pos] = array[new_position];\n
array[new_posit
ion] = temp;\n}"
],
"key" : 1,
"order" : "O(n2)",
"shortTitle" : "Bubble Sort",
"tileImage" : "images/sorting/bubblepass.png",
"title" : "Bubble Sort"
},
{ "backgroundImage" : "images/sorting/insertion1.gif",
"description" : "Insertion sort belongs to the O(n2) sorting algorithms. Unl
ike many sorting algorithms with quadratic complexity, it is actually applied in
practice for sorting small arrays of data. For instance, it is used to improve
quicksort routine. Some sources notice, that people use same algorithm ordering
items, for example, hand of cards.\n\nAlgorithm:\nInsertion sort algorithm somew
hat resembles selection sort. Array is imaginary divided into two parts - sorted
one and unsorted one. At the beginning, sorted part contains first element of t
he array and unsorted one contains the rest. At every step, algorithm takes firs
t element in the unsorted part and inserts it to the right place of the sorted o
ne. When unsorted part becomes empty, algorithm stops\n\nThe ideas of insertion:

\nThe main operation of the algorithm is insertion. The task is to insert a valu
e into the sorted part of the array. Let us see the variants of how we can do it
.\n\n\"Sifting down\" using swaps:\nThe simplest way to insert next element into
the sorted part is to sift it down, until it occupies correct position. Initial
ly the element stays right after the sorted part. At each step algorithm compare
s the element with one before it and, if they stay in reversed order, swap them.
\n\nInsertion sort properties:\nadaptive (performance adapts to the initial orde
r of elements)\nstable (insertion sort retains relative order of the same elemen
ts)\nin-place (requires constant amount of additional space)\nonline (new elemen
ts can be added during the sort).",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "static void InsertSort<T>(IList<T> list) where T : ICompar
able<T>\n{\n
int i, j;\n\n
for (i = 1; i < list.Count; i++)\n
{\n
T value = list[i];\n
j = i - 1;\n
while ((j >= 0) && (list[j].Co
mpareTo(value) > 0))\n
{\n
list[j + 1] = list[j];\n
j--;\n
}\n
list[j + 1] = value;\n
}\n}",
"void insertionSort(int arr[], int length) {\n
int i, j, tm
p;\n
for (i = 1; i < length; i++) {\n
j = i;\n
while
(j > 0 && arr[j - 1] > arr[j]) {\n
tmp = arr[j];\n
arr[j] = arr[j - 1];\n
arr[j - 1] = tmp;\n
j--;\n
}\n
}\n}",
"public void sort(E[] array) {\n
for (int i = 1; i < array.le
ngth; i++) {\n
for (int j = i; (j > 0) && (array[j].compareTo(array[j - 1
]) < 0); j--) {\n
swap(array, j, j - 1);\n
}\n
}\n}\n\npriv
ate static <E> void swap(E[] array, int current_pos, int new_position) {\n
E
temp = array[current_pos];\n
array[current_pos] = array[new_position];\n
a
rray[new_position] = temp;\n}"
],
"key" : 2,
"order" : "O(n2)",
"shortTitle" : "Insertion Sort",
"tileImage" : "images/sorting/insertion1.gif",
"title" : "Insertion Sort"
},
{ "backgroundImage" : "images/sorting/selection_2.jpg",
"description" : "Selection sort is one of the O(n2) sorting algorithms, whic
h makes it quite inefficient for sorting large data volumes. Selection sort is n
otable for its programming simplicity and it can over perform other sorts in cer
tain situations (see complexity analysis for more details).\n\nAlgorithm:\nThe i
dea of algorithm is quite simple. Array is imaginary divided into two parts - so
rted one and unsorted one. At the beginning, sorted part is empty, while unsorte
d one contains whole array. At every step, algorithm finds minimal element in th
e unsorted part and adds it to the end of the sorted one. When unsorted part bec
omes empty, algorithm stops.\nWhen algorithm sorts an array, it swaps first elem
ent of unsorted part with minimal element and then it is included to the sorted
part. This implementation of selection sort in not stable. In case of linked lis
t is sorted, and, instead of swaps, minimal element is linked to the unsorted pa
rt, selection sort is stable.\n\nSelection sort stops, when unsorted part become
s empty. As we know, on every step number of unsorted elements decreased by one.
Therefore, selection sort makes n steps (n is number of elements in array) of o
uter loop, before stop. Every step of outer loop requires finding minimum in uns
orted part. Summing up, n + (n - 1) + (n - 2) + ... + 1, results in O(n2) number
of comparisons. Number of swaps may vary from zero (in case of sorted array) to

n - 1 (in case array was sorted in reversed order), which results in O(n) numbe
r of swaps. Overall algorithm complexity is O(n2).\n\nFact, that selection sort
requires n - 1 number of swaps at most, makes it very efficient in situations, w
hen write operation is significantly more expensive, than read operation.",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Times",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "static void selectionSortGeneric<T>(T[] a) where T : IComp
arable\n{\n
for(int sortedSize=0; sortedSize < a.Length; sortedSize++)\n
{
\n
int minElementIndex = sortedSize;\n
T minElementValue = a[sorte
dSize];\n
for(int i=sortedSize+1; i<a.Length; i++) \n
{\n
if (a[i].CompareTo(minElementValue) < 0)\n
{\n
minE
lementIndex = i;\n
minElementValue = a[i];\n
}\n
}\n
a[minElementIndex] = a[sortedSize];\n
a[sortedSize] = minEle
mentValue;\n
}\n}",
"void selectionSort(int arr[], int n) {\n
int i, j, minInde
x, tmp;
\n
for (i = 0; i < n - 1; i++) {\n
minIndex = i;\n
for (j = i + 1; j < n; j++)\n
if (arr[j] < arr[minInde
x])\n
minIndex = j;\n
if (minIndex != i) {\n
tmp = arr[i];\n
arr[i] = arr[minIndex];\n
arr[minIndex] = tmp;\n
}\n
}\n}",
"public void sort(E[] array) {
\n
for (int i = 0; i < arra
y.length - 1; i++) {\n
int min_index = i;\n
for (int j = array.len
gth - 1; j > i; j--) {\n
if (array[j].compareTo(array[min_index]) < 0
) {\n
min_index = j;\n
}\n
}\n
swap(arra
y, i, min_index);\n
}\n}\n\npublic static <E> void swap(E[] array, int curren
t_pos, int new_position) {\n
E temp = array[current_pos];\n
array[current_
pos] = array[new_position];\n
array[new_position] = temp;\n}"
],
"key" : 3,
"order" : "O(n2)",
"shortTitle" : "Selection Sort",
"tileImage" : "images/sorting/selection_2.jpg",
"title" : "Selection Sort"
},
{ "backgroundImage" : "images/sorting/merge1.jpg",
"description" : "Merge sort is based on the divide-and-conquer paradigm. Its
worst-case running time has a lower order of growth than insertion sort. Since
we are dealing with subproblems, we state each subproblem as sorting a subarray
A[p .. r]. Initially, p = 1 and r = n, but these values change as we recurse thr
ough subproblems.\nTo sort A[p .. r]:\n\n1. Divide Step:\nIf a given array A has
zero or one element, simply return; it is already sorted. Otherwise, split A[p
.. r] into two subarrays A[p .. q] and A[q + 1 .. r], each containing about half
of the elements of A[p .. r]. That is, q is the halfway point of A[p .. r].\n\n
2. Conquer Step:\nConquer by recursively sorting the two subarrays A[p .. q] and
A[q + 1 .. r].\n\n3. Combine Step:\nCombine the elements back in A[p .. r] by m
erging the two sorted subarrays A[p .. q] and A[q + 1 .. r] into a sorted sequen
ce. To accomplish this step, we will define a procedure MERGE (A, p, q, r).\n\nN
ote that the recursion bottoms out when the subarray has just one element, so th
at it is trivially sorted.",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",

"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "public IList MergeSort(IList list)\n{\n
if (list.Count
<= 1)\n
return list;\n\n
int mid = list.Count / 2;\n\n
IList left =
new ArrayList();\n
IList right = new ArrayList();\n\n
for (int i = 0; i <
mid; i++)\n
left.Add(list[i]);\n\n
for (int i = mid; i < list.Count;
i++)\n
right.Add(list[i]);\n\n
return Merge(MergeSort(left), MergeSort
(right));\n}\n\npublic IList Merge(IList left, IList right) \n{\n
IList rv =
new ArrayList();\n
\n
while (left.Count > 0 && right.Count > 0)\n
i
f (((IComparable)left[0]).CompareTo(right[0]) > 0)\n
{\n
rv.Ad
d(right[0]);\n
right.RemoveAt(0);\n
}\n
else\n
{
\n
rv.Add(left[0]);\n
left.RemoveAt(0);\n
}\n\n
for (int i = 0; i < left.Count; i++)\n
rv.Add(left[i]);\n\n
for (int i
= 0; i < right.Count; i++)\n
rv.Add(right[i]);\n\n
return rv;\n}",
"vector<int> merge_sort(vector<int>& vec)\n{\n
// Termination
condition: List is completely sorted if it\n
// only contains a single eleme
nt.\n
if(vec.size() == 1)\n
{\n
return vec;\n
}\n\n
// Determ
ine the location of the middle element in the vector\n
std::vector<int>::iter
ator middle = vec.begin() + (vec.size() / 2);\n\n
vector<int> left(vec.begin(
), middle);\n
vector<int> right(middle, vec.end());\n\n
// Perform a merge
sort on the two smaller vectors\n
left = merge_sort(left);\n
right = merg
e_sort(right);\n\n
return merge(left, right);\n}",
"public void sort(E[] array) {\n
int left = 0;\n
int right
= array.length - 1;\n
mergeSort(array, this.temp, left, right);\n}\n\nprivat
e void mergeSort(E[] array, E[] temp, int left, int right) {\n
int mid = (lef
t + right) / 2;\n
if (left == right) {\n
return;\n
}\n
mergeSort
(array, temp, left, mid);\n
mergeSort(array, temp, mid + 1, right);\n
for
(int i = left; i <= right; i++) {\n
comparisons++;\n
temp[i] = arr
ay[i];\n
}\n
int pos1 = left;\n
int pos2 = mid + 1;\n
for (int j = l
eft; j <= right; j++) {\n
comparisons++;\n
if (pos1 == mid + 1) {\
n
array[j] = temp[pos2++];\n
swaps++;\n
} else if (
pos2 > right) {\n
array[j] = temp[pos1++];\n
swaps++;\n
} else if (temp[pos1].compareTo(temp[pos2]) < 0) {\n
array[j] =
temp[pos1++];\n
swaps++;\n
} else {\n
array[j] = te
mp[pos2++];\n
swaps++;\n
}\n
}\n}"
],
"key" : 4,
"order" : "O(n log n)",
"shortTitle" : "Merge Sort",
"tileImage" : "images/sorting/merge1.jpg",
"title" : "Merge Sort"
},
{ "backgroundImage" : "images/sorting/quick1.gif",
"description" : "Quicksort is a fast sorting algorithm, which is used not on
ly for educational purposes, but widely applied in practice. On the average, it
has O(n log n) complexity, making quicksort suitable for sorting big data volume
s. The idea of the algorithm is quite simple and once you realize it, you can wr
ite quicksort as fast as bubble sort.\n\nAlgorithm:\nThe divide-and-conquer stra
tegy is used in quicksort. Below the recursion step is described:\n1.Choose a pi
vot value: We take the value of the middle element as pivot value, but it can be
any value, which is in range of sorted values, even if it doesn't present in th
e array.\n2.Partition: Rearrange elements in such a way, that all elements which
are lesser than the pivot go to the left part of the array and all elements gre
ater than the pivot, go to the right part of the array. Values equal to the pivo
t can stay in any part of the array. Notice, that array may be divided in non-eq
ual parts.\n3.Sort both parts: Apply quicksort algorithm recursively to the left
and the right parts.\n\nPartition algorithm in detail:\nThere are two indices i
and j and at the very beginning of the partition algorithm i points to the firs
t element in the array and j points to the last one. Then algorithm moves i forw

ard, until an element with value greater or equal to the pivot is found. Index j
is moved backward, until an element with value lesser or equal to the pivot is
found. If i j then they are swapped and i steps to the next position (i + 1), j
steps to the previous one (j - 1). Algorithm stops, when i becomes greater than
j.\n\nAfter partition, all values before i-th element are less or equal than the
pivot and all values after j-th element are greater or equal to the pivot.\n\nW
hy does it work?\nOn the partition step algorithm divides the array into two par
ts and every element a from the left part is less or equal than every element b
from the right part. Also a and b satisfy a pivot b inequality. After completion
of the recursion calls both of the parts become sorted and, taking into account
arguments stated above, the whole array is sorted.\n\nComplexity analysis:\nOn
the average quicksort has O(n log n) complexity, but strong proof of this fact i
s not trivial and not presented here. Still, you can find the proof in [1]. In w
orst case, quicksort runs O(n2) time, but on the most \"practical\" data it work
s just fine and outperforms other O(n log n) sorting algorithms.",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "class Quicksort {\n
private void swap(int[] Array, int
Left, int Right) {\n
int temp = Array[Right];\n
Ar
ray[Right] = Array[Left];\n
Array[Left] = temp;\n
}\n\n
public void sort(int[] Array, int Left, int Right) {\n
int LHold = Left;
\n
int RHold = Right;\n
Random ObjRan = new Random();\n
int
Pivot = ObjRan.Next(Left,Right);\n
swap(Array,Pivot,Left);\n
Pivot = Left;\n
Left++;\n\n
while (Right >= Left) {\n
i
f (Array[Left] >= Array[Pivot]\n
&& Array[Right] < Array[Pivot])\
n
swap(Array, Left, Right);\n
else if (Array[Left] >=
Array[Pivot])\n
Right--;\n
else if (Array[Right] < Arr
ay[Pivot])\n
Left++;\n
else {\n
Right-;\n
Left++;\n
}
}
\n
swap(Array, Pivot, Right
);\n
Pivot = Right;
\n
if (Pivot > LHold)\n
sort(Arr
ay, LHold, Pivot);\n
if (RHold > Pivot+1)\n
sort(Array, Pivo
t+1, RHold);\n
}
\n}",
"#include <functional>\n#include <algorithm>\n#include <iterator
>\n\ntemplate< typename BidirectionalIterator, typename Compare >\nvoid quick_so
rt( BidirectionalIterator first, BidirectionalIterator last, Compare cmp ) {\n
if( first != last ) {\n
BidirectionalIterator left = first;\n
Bidirection
alIterator right = last;\n
BidirectionalIterator pivot = left++;\n\n
while
( left != right ) {\n
if( cmp( *left, *pivot ) ) {\n
++left;\n
} else {\n
while( (left != right) && cmp( *pivot, *right ) )\n
--right;\n
std::iter_swap( left, right );\n
}\n
}\n\n
--lef
t;\n
std::iter_swap( first, left );\n\n
quick_sort( first, left, cmp );\n
quick_sort( right, last, cmp );\n }\n}\n\ntemplate< typename BidirectionalIt
erator >\ninline void quick_sort( BidirectionalIterator first, BidirectionalIter
ator last ) {\n quick_sort( first, last,\n
std::less_equal< typename std::it
erator_traits< BidirectionalIterator >::value_type >()\n );\n}",
"public void sort(E[] array) {\n
comparisons = 0;\n
swaps
= 0;\n
int i = 0;\n
int j = array.length - 1;\n
quickSort(array, i, j);
\n}\n\nprivate void quickSort(E[] array, int i, int j) {\n
int pivotIndex = f
indPivot(i, j);\n
NativeMethods.swap(array, pivotIndex, j);\n
swaps++;\n
int k = partition(array, i - 1, j, array[j]);\n
NativeMethods.swap(array, k
, j);\n
swaps++;\n
if ((k - i) > 1) {\n
quickSort(array, i, k - 1);
\n
}\n
if ((j - k) > 1) {\n
quickSort(array, k + 1, j);\n
}\n}"
],
"key" : 5,

"order" : "O(n log n)",


"shortTitle" : "Quick Sort",
"tileImage" : "images/sorting/quick1.gif",
"title" : "Quick Sort"
},
{ "backgroundImage" : "images/sorting/heap_1.png",
"description" : "Heap Sort is another optimal comparison sort as its time co
mplexity is O(nlgn) in both average and worst cases. Although somewhat slower in
practice on most machines than a good implementation of quicksort, it has the a
dvantages of worst-case O(n log n) runtime. Heapsort is an in-place algorithm an
d is not a stable sort.\nThe main idea behind heapsort is to construct a heap ou
t of the list of elements to be sorted. From this heap the maximum element (in c
ase of max-heap) or the minimum element (in case of min-heap) can be repeatedly
taken out with ease to give the sorted list of elements.\n\nDescription of the I
mplementation Of Heap Sort:\nThe code uses max-heap for sorting an array. It sta
rts with the macro definitions of left and right children of a node i. The rest
of the code consists of 3 functions :\n\n1) max_heapify() : It takes as input th
e index of node i in an array a whose size is also passed. the node i has the pr
operty that both its left and right subtrees are max-heaps. But the tree rooted
at node i may not be so. This function makes this tree a max-heap. It traverses
the tree starting from node i to the leaves. Every time it compares the node wit
h its left and right children and the maximum of all of them takes the position
of the node.\n\n2) build_max-heap() : It is the function for the initial formati
on of the heap when the sorting starts. It calls max-heapify() function for all
non-leaf nodes. This is so because the leaf nodes are trivially max-heaps of siz
e 1. When this function returns, the array becomes a max-heap.\n\n3) heap_sort()
: This function is the main heapsort function. It starts by calling the build_m
ax_heap() function on the array. After this function, the max element of the hea
p is replaced by the last element in the heap and its size is reduced by 1. Then
max_heapify() is called from the root of heap. This is continued till the heap
contains no more elements.\n\nFor example, take the max-heap as\n87 34 67 20 18
56 40\n\nNow 87 will be replaced by 40\n40 34 67 20 18 56 | 87\nafter max_heapif
y()\n67 34 56 20 18 40 | 87\n\nNow 67 will be replaced by 40\n40 34 56 20 18 | 6
7 87\nafter max_heapify()\n56 34 40 20 18 | 67 87\n\nNow 56 will be replaced by
18\n18 34 40 20 | 56 67 87\nafter max_heapify()\n40 34 18 20 | 56 67 87\n\nNow 4
0 will be replaced by 20\n20 34 18 | 40 56 67 87\nafter max_heapify()\n34 20 18
| 40 56 67 87\n\nNow 34 will be replaced by 18\n18 20 | 34 40 56 67 87\nafter ma
x_heapify()\n20 18 | 34 40 56 67 87\n\nNow 20 will be replaced by 18\n18 | 20 34
40 56 67 87\nafter max_heapify()\n18 | 20 34 40 56 67 87\n\nThus the sorted lis
t is 18 20 34 40 56 67 87\n\nComplexity:\nIt is a binary tree, so it takes O(log
n) sifting elements in worst case. And we do this action for n elements, thus t
he final running time is O(n log n)",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "public static void HeapSort(int[] input)\n{\n
//Build-M
ax-Heap\n
int heapSize = input.Length;\n
for (int p = (heapSize - 1) / 2;
p >= 0; p--)\n
MaxHeapify(input, heapSize, p);\n\n
for (int i = input.
Length - 1; i > 0; i--)\n
{\n
//Swap\n
int temp = input[i];\n
input[i] = input[0];\n
input[0] = temp;\n\n
heapSize--;\n
MaxHeapify(input, heapSize, 0);\n
}\n}\n\nprivate static void MaxHeapify
(int[] input, int heapSize, int index)\n{\n
int left = (index + 1) * 2 - 1;\n
int right = (index + 1) * 2;\n
int largest = 0;\n\n
if (left < heapSiz
e && input[left] > input[index])\n
largest = left;\n
else\n
lar
gest = index;\n\n
if (right < heapSize && input[right] > input[largest])\n

largest = right;\n\n
if (largest != index)\n
{\n
int temp = in
put[index];\n
input[index] = input[largest];\n
input[largest] = te
mp;\n\n
MaxHeapify(input, heapSize, largest);\n
}\n}",
"void heapSort(int numbers[], int array_size) {\n int i, temp;\
n \n for (i = (array_size / 2); i >= 0; i--) {\n
siftDown(numbers, i, array_
size - 1);\n }\n \n for (i = array_size-1; i >= 1; i--) {\n
// Swap\n
te
mp = numbers[0];\n
numbers[0] = numbers[i];\n
numbers[i] = temp;\n \n
s
iftDown(numbers, 0, i-1);\n }\n}\n \nvoid siftDown(int numbers[], int root, int
bottom) {\n int maxChild = root * 2 + 1;\n \n // Find the biggest child\n if
(maxChild < bottom) {\n
int otherChild = maxChild + 1;\n
// Reversed for s
tability\n
maxChild = (numbers[otherChild] > numbers[maxChild])?otherChild:ma
xChild;\n } else {\n
// Don't overflow\n
if(maxChild > bottom) return;\n
}\n \n // If we have the correct ordering, we are done.\n if(numbers[root] >=
numbers[maxChild]) return;\n \n // Swap\n int temp = numbers[root];\n number
s[root] = numbers[maxChild];\n numbers[maxChild] = temp;\n \n // Tail queue re
cursion. Will be compiled as a loop with correct compiler switches.\n siftDown(
numbers, maxChild, bottom);\n}",
"public void sort(E[] array) {\n
MyHeap<E> max_heap = new Max
Heap<E>(array, array.length, array.length);\n
for (int i = 0; i < array.lengt
h; i++) {\n
max_heap.removeMax();\n
}\n}\n\n
public void buildHeap(
) {\n
for (int i = num / 2 - 1; i >= 0; i--) {\n
siftdown(i);\n
}\n
}\n
\n
private void siftdown(int pos) {\n
assert (pos >= 0) && (pos < 0
) : \"Illegal Heap Position\";\n
while (!isLeaf(pos)) {\n
int j = left
Child(pos);\n
if ((j < (num - 1)) && (heap[j].compareTo(heap[j + 1]) < 0)
) {\n
j++;\n
}\n
if (heap[pos].compareTo(heap[j]) >= 0)
{\n
return;\n
}\n
wap(heap, pos, j);\n
pos = j;
\n
}\n}\n
\npublic E removeMax() {\n
assert num > 0 : \"Removing From E
mpty Heap\";\n
swap(heap, 0, --num);\n
if (num != 0) {\n
siftdown(0
);\n
}\n
return heap[num];\n}"
],
"key" : 6,
"order" : "O(n log n)",
"shortTitle" : "Heap Sort",
"tileImage" : "images/sorting/heap_1.png",
"title" : "Heap Sort"
},
{ "backgroundImage" : "images/sorting/counting.gif",
"description" : "Counting sort is a linear time sorting algorithm used to so
rt items when they belong to a fixed and finite set. Integers which lie in a fix
ed interval, say k1 to k2, are examples of such items.\n\nThe algorithm proceeds
by defining an ordering relation between the items from which the set to be sor
ted is derived (for a set of integers, this relation is trivial).Let the set to
be sorted be called A. Then, an auxiliary array with size equal to the number of
items in the superset is defined, say B. For each element in A, say e, the algo
rithm stores the number of items in A smaller than or equal to e in B(e). If the
sorted set is to be stored in an array C, then for each e in A, taken in revers
e order, C[B[e]] = e. After each such step, the value of B(e) is decremented.\n\
nThe algorithm makes two passes over A and one pass over B. If size of the range
k is smaller than size of input n, then time complexity=O(n). Also, note that i
t is a stable algorithm, meaning that ties are resolved by reporting those eleme
nts first which occur first.\n\nThis visual demonstration takes 8 randomly gener
ated single digit numbers as input and sorts them. The range of the inputs are f
rom 0 to 9.\n\nAnalysis:\nBecause the algorithm uses only simple for loops, with
out recursion or subroutine calls, it is straightforward to analyze. The initial
ization of the Count array, and the second for loop which performs a prefix sum
on the count array, each iterate at most k + 1 times and therefore take O(k) tim
e. The other two for loops, and the initialization of the output array, each tak
e O(n) time. Therefore the time for the whole algorithm is the sum of the times
for these steps, O(n + k).\n\nBecause it uses arrays of length k + 1 and n, the
total space usage of the algorithm is also O(n + k).For problem instances in whi

ch the maximum key value is significantly smaller than the number of items, coun
ting sort can be highly space-efficient, as the only storage it uses other than
its input and output arrays is the Count array which uses space O(k)",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time and Stability",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "static void Sort(int[] a, int numValues) {\n
int[] coun
ts = new int[numValues];\n
for(int i=0; i < a.Length; i++) {\n
counts[a[i]
]++;\n
}\n
int outputPos = 0;\n
for (int i=0; i < numValues
; i++) {\n
for (int j=0; j < counts[i]; j++) {\n
a[out
putPos] = i;\n
outputPos++;\n
}\n
}\n}",
"#include <stdio.h>\n#include <stdlib.h>\n\nvoid counting_sort(i
nt a[], unsigned int length, int num_values) {\n
unsigned int i;\n
unsigne
d int j, output_pos;\n
int* counts = malloc(sizeof(int) * num_values);\n
i
f (counts == NULL) {\n
printf(\"Out of memory\n\");\n
exit(-1);\n
}\n
for(i=0; i<length; i++) {\n
counts[a[i]]++;\n
}\n
output_
pos = 0;\n
for (i=0; i<num_values; i++) {\n
for (j=0; j<counts[i]; j++
) {\n
a[output_pos] = i;\n
output_pos++;\n
}\n
}
\n
free(counts);\n}\n",
"public static void sort(int[] a) {\n
if (a.length == 0)\n
return;\n\n
int max = a[0], min = a[0];\n
for (int i = 1;
i < a.length; i++) {\n
if (a[i] > max)\n
max = a[i];\
n
else if (a[i] < min)\n
min = a[i];\n
}\n
int numValues = max - min + 1;\n
int[] counts = new int[numValues];\n
f
or (int i = 0; i < a.length; i++) {\n
counts[a[i]-min]++;\n
}\
n
int outputPos = 0;\n
for (int i = 0; i < numValues; i++) {\n
for (int j = 0; j < counts[i]; j++) {\n
a[outputPos] = i+
min;\n
outputPos++;\n
}\n
}\n}"
],
"key" : 7,
"order" : "O(n)",
"shortTitle" : "Counting Sort",
"tileImage" : "images/sorting/counting.gif",
"title" : "Couting Sort"
},
{ "backgroundImage" : "images/sorting/bucket.jpg",
"description" : "Bucket sort runs in linear time on the average. It assumes
that the input is generated by a random process that distributes elements unifor
mly over the interval [0, 1).\n\nThe idea of Bucket sort is to divide the interv
al [0, 1) into n equal-sized subintervals, or buckets, and then distribute the n
input numbers into the buckets. Since the inputs are uniformly distributed over
(0, 1), we don't expect many numbers to fall into each bucket. To produce the o
utput, simply sort the numbers in each bucket and then go through the bucket in
order, listing the elements in each.\n\nThe code assumes that input is in n-elem
ent array A and each element in A satisfies 0 <= A[i] <= 1. We also need an auxi
liary array B[0 . . n -1] for linked-lists (buckets).\n\nCode explanation :\n\nA
t first, we define the value of m, which means that all the elements we will int
roduce for the array will have to be lower than m. Next, we make buckets for the
size of m, and we make them null and in the end, we add the elements to the pro
per buckets. It is not required another type of sorting algorithm, in this examp
le we use bucket sort only, as we use a bucket for each element of the array, th
is might seem familiar with radix sort.\nComplexity:\n\nBucket sort can be seen
as a generalization of counting sort; in fact, if each bucket has size 1 then bu
cket sort degenerates to counting sort. The variable bucket size of bucket sort
allows it to use O(n) memory instead of O(M) memory, where M is the number of di

stinct values; in exchange, it gives up counting sort's O(n + M) worst-case beha


vior.\n\nThe time complexity of bucket sort is: O(n + m) where: m is the range i
nput values, n is the total number of values in the array. Bucket sort beats all
other sorting routines in time complexity. It should only be used when the rang
e of input values is small compared with the number of values. In other words, o
ccasions when there are a lot of repeated values in the input. Bucket sort works
by counting the number of instances of each input value throughout the array. I
t then reconstructs the array from this auxiliary data. This implementation has
a configurable input range, and will use the least amount of memory possible.\n\
nAds\n\nAdvantages:\n\n
the user knows the range of the elements;\n
time c
omplexity is good compared to other algorithms.\n\nDisadvantages:\n\n
you are
limited to having to know the greatest element;\n
extra memory is required;\
n\nConclusion:\n\nIf you know the range of the elements, the bucket sort is quit
e good, with a time complexity of only O(n+k). At the same time, this is a major
drawback, since it is a must to know the greatest elements. It is a distributio
n sort and a cousin of radix sort.",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "public IList BucketSort(IList iList)\n{\nIList _iList = iL
ist;\nif (iList == null || iList.Count <= 1) return iList;\nvar maxValue = iList
[0];\nvar minValue = iList[0];\nforeach (var item in iList)\n{\n
var _item =
(IComparable)item;\n
if ((_item).CompareTo(maxValue) > 0) { maxValue = item;
}\n
if ((_item).CompareTo(minValue) < 0) { minValue = item; }\n}\nArrayList[]
bucket = new ArrayList[(int)maxValue - (int)minValue + 1];\nforeach (var item i
n iList)\n{\n
bucket[(int)item] = new ArrayList();\n}\nforeach (var item in i
List)\n{\n
bucket[(int)item - (int)minValue].Add(item);\n}\niList.Clear();\nf
oreach (var node in bucket)\n{\n
if (node.Count > 0)\n
{\n
foreach
(var element in node) { iList.Add(element); }\n
}\n}\nreturn iList;\n}",
"void bucketSort(int array[], int n) {\n int i, j;\n int count
[n];\n for(i=0; i < n; i++) {\n
count[i] = 0;\n }\n for(i=0; i < n; i++) {
\n
(count[array[i]])++;\n }\n for(i=0,j=0; i < n; i++) {\n
for(; count[i
]>0; (count[i])--) {\n
array[j++] = i;\n
}\n }\n}",
"public static int[] bucketSort(int[] arr) {\n
int i, j;\n
int[] count = new int[arr.length];\n
for (i = 0; i < arr.length; i++) {\n
count[arr[i]]++;\n
}\n
for (i = 0, j = 0; i < count.length; i++) {\n
for (; count[i] > 0; (count[i])--) {\n
arr[j++] = i;\n
}\n
}\n
return arr;\n}"
],
"key" : 8,
"order" : "O(n)",
"shortTitle" : "Bucket Sort",
"tileImage" : "images/sorting/bucket.jpg",
"title" : "Bucket Sort"
},
{ "backgroundImage" : "images/sorting/RadixSort1.jpg",
"description" : "Radix sort is one of the linear sorting algorithms for inte
gers. It functions by sorting the input numbers on each digit, for each of the d
igits in the numbers. However, the process adopted by this sort method is somewh
at counterintuitive, in the sense that the numbers are sorted on the least-signi
ficant digit first, followed by the second-least significant digit and so on til
l the most significant digit.\n\nTo appreciate Radix Sort, consider the followin
g analogy: Suppose that we wish to sort a deck of 52 playing cards (the differen
t suits can be given suitable values, for example 1 for Diamonds, 2 for Clubs, 3
for Hearts and 4 for Spades). The 'natural' thing to do would be to first sort

the cards according to suits, then sort each of the four seperate piles, and fin
ally combine the four in order. This approach, however, has an inherent disadvan
tage. When each of the piles is being sorted, the other piles have to be kept as
ide and kept track of. If, instead, we follow the 'counterintuitive' aproach of
first sorting the cards by value, this problem is eliminated. After the first st
ep, the four seperate piles are combined in order and then sorted by suit. If a
stable sorting algorithm (i.e. one which resolves a tie by keeping the number ob
tained first in the input as the first in the output) it can be easily seen that
correct final results are obtained.\n\nAs has been mentioned, the sorting of nu
mbers proceeds by sorting the least significant to most significant digit. For s
orting each of these digit groups, a stable sorting algorithm is needed. Also, t
he elements in this group to be sorted are in the fixed range of 0 to 9. Both of
these characteristics point towards the use of Counting Sort as the sorting alg
orithm of choice for sorting on each digit (If you haven't read the description
on Counting Sort already, please do so now).\n\nThe time complexity of the algor
ithm is as follows: Suppose that the n input numbers have maximum k digits. Then
the Counting Sort procedure is called a total of k times. Counting Sort is a li
near, or O(n) algorithm. So the entire Radix Sort procedure takes O(kn) time. If
the numbers are of finite size, the algorithm runs in O(n) asymptotic time.\n\n
A graphical representation of the entire procedure has been provided in the atta
ched applet. This program takes 8 randomly generated 3 digit integers as input a
nd sorts them using Radix Sort.\n\nAnalysis\n\nThe running time depends on the s
table used as an intermediate sorting algorithm. When each digits is in the rang
e 1 to k, and k is not too large, COUNTING_SORT is the obvious choice. In case
of counting sort, each pass over n d-digit numbers takes O(n + k) time. There ar
e d passes, so the total time for Radix sort is (n+k) time. There are d passes,
so the total time for Radix sort is (dn+kd). When d is constant and k = (n), the
Radix sort runs in linear time.\n",
"group" : { "backgroundImage" : "images/sorting/bubblepass.png",
"description" : "Set of Algorithms Descripes Sorting Process. With Diffe
rent Purpose Sorting and Different Running Time",
"groupImage" : "images/sorting/sort_header.png",
"key" : "Sorting",
"shortTitle" : "Sorting",
"title" : "Sorting"
},
"algorithms" : [ "public void RadixSort(int[] a)\n{ \n
// helper array \
n
int[] t=new int[a.Length]; \n
// number of bits our group will be long \
n
int r=4; // try to set this also to 2, 8 or 16 to see if it is quicker or n
ot\n
// number of bits of a C# int \n
int b=32; \n
// counting and pref
ix arrays\n
// (note dimensions 2^r which is the number of all possible value
s of a r-bit number) \n
int[] count=new int[1<<r]; \n
int[] pref=new int[1
<<r]; \n
// number of groups \n
int groups=(int)Math.Ceiling((double)b/(do
uble)r); \n
// the mask to identify groups \n
int mask = (1<<r)-1; \n \n
// the algorithm: \n
for (int c=0, shift=0; c<groups; c++, shift+=r)\n
{
\n
// reset count array \n
for (int j=0; j<count.Length; j++)\n
count[j]=0;\n \n
// counting elements of the c-th group \n
for (int i=0; i<a.Length; i++)\n
count[(a[i]>>shift)&mask]++; \n \
n
// calculating prefixes \n
pref[0]=0; \n
for (int i=1; i<
count.Length; i++)\n
pref[i]=pref[i-1]+count[i-1];\n
for (int
i=0; i<a.Length; i++)\n
t[pref[(a[i]>>shift)&mask]++]=a[i];\n
t.CopyTo(a,0); \n
}\n}",
"typedef struct slist_ { \n
int val;\n
struct slist_ *next
; \n} slist;\n \nslist *radixsort(slist *L, int t) \n{\n
int i, j, d, m=1;\n
slist *temp, *head[10], *tail[10];\n
for (j=1; j<=t; j++) \n
{\n
for (i=0; i<=9; i++)\n
{\n
head[i] = NULL;\n
tail[
i] = NULL;\n
}\n
while ( L != NULL ) \n
{\n
d =
static_cast<int>(L->val/m)%10;\n
temp = L;\n
L = L->next;\
n
temp->next = NULL;\n
if (head[d]!= NULL)\n
{\
n
tail[d]->next = temp;\n
tail[d] = temp;\n

}\n

else\n
{ // The queue for digit d is empty\n
head[d] = temp; // Point to the new head\n
tail[d]
= temp; // Point to the new tail\n
}\n
}\n
d=0;\n
while (head[d]==NULL)\n
d++;\n
L = head[d];\n
temp =
tail[d];\n
d++;\n
while (d < 10)\n
{\n
if (head
[d]!=NULL)\n
{\n
temp->next = head[d];\n
temp = tail[d];\n
}\n
d++;\n
}\n
m = m * 1
0;\n
return L;\n}",
"// LSD radix sort\npublic static void sort(String[] a, int W) {
\n
int N = a.length;\n
int R = 256; // extend ASCII alphabet size\n
S
tring[] aux = new String[N];\n\n
for (int d = W-1; d >= 0; d--) {\n
//
sort by key-indexed counting on dth character\n\n
// compute frequency c
ounts\n
int[] count = new int[R+1];\n
for (int i = 0; i < N; i++)\
n
count[a[i].charAt(d) + 1]++;\n\n
// compute cumulates\n
for (int r = 0; r < R; r++)\n
count[r+1] += count[r];\n\n
/
/ move data\n
for (int i = 0; i < N; i++)\n
aux[count[a[i].cha
rAt(d)]++] = a[i];\n\n
// copy back\n
for (int i = 0; i < N; i++)\
n
a[i] = aux[i];\n
}\n}"
],
"key" : 9,
"order" : "O(n)",
"shortTitle" : "Radix Sort",
"tileImage" : "images/sorting/RadixSort1.jpg",
"title" : "Radix Sort"
},

{ "backgroundImage" : "images/math/sieve.png",
"description" : "Sieve of Eratosthenes is used to find prime numbers up to s
ome predefined integer n. For sure, we can just test all the numbers in range fr
om 2 to n for primality using some approach, but it is quite inefficient. Sieve
of Eratosthenes is a simple algorithm to find prime numbers. Though, there are b
etter algorithms exist today, sieve of Eratosthenes is a great example of the si
eve approach.\nAlgorithm\n\nFirst of all algorithm requires a bit array isCompos
ite to store n - 1 numbers: isComposite[2 .. n]. Initially the array contains ze
ros in all cells. During algorithm's work, numbers, which can be represented as
k * p, where k >= 2, p is prime, are marked as composite numbers by writing 1 in
a corresponding cell. Algorithm consists of two nested loops: outer, seeking fo
r unmarked numbers (primes), and inner, marking primes' multiples as composites.
\n\n
For all numbers m: 2 .. n, if m is unmarked:\n
add m to primes li
st;\n
mark all it's multiples, lesser or equal, than n (k * m <= n, k >=
2);\n
otherwise, if m is marked, then it is a composite number.\n\nModificati
on\n\nLet's notice, that algorithm can start marking multiples beginning from sq
uare of a found unmarked number. Indeed,\n\nFor m = 2, algorithm marks\n\n
2
* 2 3 * 2 4 * 2 5 * 2 6 * 2 7 * 2 ...\n\nFor m = 3,\n\n
2 * 3\n\n
were already marked on m = 2 step.\n\nFor m = 5,\n\n
2 * 5 3 * 5 4 * 5 (=
10 * 2)\n\nwere already marked on m = 2 and m = 3 steps.\n\nStrong proof is omi
tted, but you can easily prove it on your own. Modified algorithm is as followin
g:\n\n
For all numbers m: 2 .. sqrt(n), if m is unmarked:\n
add m to p

rimes list;\n
mark all it's multiples, starting from square, lesser or eq
ual than n (k * m <= n, k >= m);\n
otherwise, if m is marked, then it is a co
mposite number;\n
check all numbers in range sqrt(n) .. n. All found unmarked
numbers are primes, add them to list.\n\nExample\n\nApply sieve of Eratosthenes
to find all primes in range 2..100.\nNB: Shown in The Algorithm Image",
"group" : { "backgroundImage" : "images/math/math_1.gif",
"description" : "Set of Mathematical based Algorithms",
"groupImage" : "images/math/math_header.png",
"key" : "Mathematics",
"shortTitle" : "Math",
"title" : "Mathematics"
},
"algorithms" : [ "// Normal Code C#\nbool[] notPrime = new bool[total];\nnot
Prime[0] = true;\nnotPrime[1] = true;\nfor (int i = 2; i <= Math.Sqrt(notPrime.L
ength); i++) {\n if (!notPrime[i]) {\n
for (int j = i * 2; j < notPrime.Leng
th; j += i) {\n
notPrime[j] = true;\n
}\n }\n}\n\n// Using Linq C#\nint
cur = 1, total = 1000;\nvar pc = Enumerable.Range(2, total).ToList();\n\nwhile(
cur <= Math.Sqrt(total))\n{\n
cur = pc.First(i => i > cur);\n
pc.RemoveAll
(i => i != cur && i % cur == 0);\n}",
"#include <iostream>\n#include <bitset>\n#define SIZE 10000000\n
#define MAX (int)(SIZE-3)/2\nusing namespace std;\n \nint primes[MAX + 1];\nbits
et<MAX + 1> bset;\n \nvoid setPrimes()\n{\n
for(int i = 0; i * i <= SIZE; i++
)\n
if(!bset.test(i))\n
for(int j = i + 1;(2 * j + 1) * ( 2 *
i + 3) <= SIZE;j++)\n
bset.set(((2 * j + 1) * (2 * i + 3) - 3)
/2);\n
primes[0]=2;\n
for(int i = 1, j = 0; j < MAX + 1; j++)\n
if(!
bset.test(j))\n
primes[i++] = 2 * j + 3; \n}",
"public class PrimeSieve {\n
public static void main(String[]
args) { \n
int N = Integer.parseInt(args[0]);\n\n
// initially as
sume all integers are prime\n
boolean[] isPrime = new boolean[N + 1];\n
for (int i = 2; i <= N; i++) {\n
isPrime[i] = true;\n
}\
n\n
// mark non-primes <= N using Sieve of Eratosthenes\n
for (int
i = 2; i*i <= N; i++) {\n\n
// if i is prime, then mark multiples of
i as nonprime\n
// suffices to consider mutiples i, i+1, ..., N/i\n
if (isPrime[i]) {\n
for (int j = i; i*j <= N; j++) {\n
isPrime[i*j] = false;\n
}\n
}\n
}\n\n
// count primes\n
int primes = 0;\n
for (int i =
2; i <= N; i++) {\n
if (isPrime[i]) primes++;\n
}\n
Sy
stem.out.println(\"The number of primes <= \" + N + \" is \" + primes);\n
}\n
}"
],
"key" : 101,
"order" : "O(n(logn)(log log n))",
"shortTitle" : "Sieve of Eratosthenes",
"tileImage" : "images/math/sieve.png",
"title" : "Sieve of Eratosthenes"
},
{ "backgroundImage" : "images/math/pasc2.gif",
"description" : "Pascal's Triangle: One of the most interesting Number Patte
rns is Pascal's Triangle (named after Blaise Pascal, a famous French Mathematici
an and Philosopher).\n\nTo build the triangle, start with \"1\" at the top, then
continue placing numbers below it in a triangular pattern.\n\nEach number is ju
st the two numbers above it added together (except for the edges, which are all
\"1\").\n\nPatterns Within the Triangle:\nDiagonals:\n
- The first diagonal i
s, of course, just \"1\"s, and the next diagonal has the Counting Numbers (1,2,3
, etc).\n
- The third diagonal has the triangular numbers\n
- (The fourth
diagonal, not highlighted, has the tetrahedral numbers.)\n\nHorizontal Sums:\n
- What do you notice about the horizontal sums?\n
- Is there a pattern? Isn
't it amazing! It doubles each time (powers of 2).\n\nExponents of 11:\nEach lin
e is also the powers (exponents) of 11:\n
11 ^ 0 = 1 (the first line is just
a \"1\")\n
11 ^ 1 = 11 (the second line is \"1\" and \"1\")\n
11 ^ 2 = 121

(the third line is \"1\", \"2\", \"1\")\n


etc!\nBut what happens with 11 ^ 5
? Simple! The digits just overlap [1 5 10 10 5 1] = [1 (5+1) (0+1) 0 5 1] = 161
051 = 11 ^ 5\n\nFibonacci Sequence:\nTry this: make a pattern by going up and th
en along, then add up the values (as illustrated) ... you will get the Fibonacci
Sequence.\n(The Fibonacci Sequence starts \"1, 1\" and then continues by adding
the two previous numbers, for example 3+5=8, then 5+8=13, etc)\n\nNB: Check the
algorithm image and try to make sense with the colored pixels\n\nSymmetrical:\n
And the triangle is also symmetrical. The numbers on the left side have identica
l matching numbers on the right side, like a mirror image.",
"group" : { "backgroundImage" : "images/math/math_1.gif",
"description" : "Set of Mathematical based Algorithms",
"groupImage" : "images/math/math_header.png",
"key" : "Mathematics",
"shortTitle" : "Math",
"title" : "Mathematics"
},
"algorithms" : [ "using System;\nusing System.Collections.Generic;\nusing Sy
stem.Text;\n\nnamespace PascalTriangle\n{\n class PascalTriangle\n {\n
static void Main(string[] args)\n
{\n
System.Console.WriteLi
ne(\"Pascal Triangle Program\");\n
System.Console.Write(\"Enter the n
umber of rows: \");\n
string input = System.Console.ReadLine();\n
int n = Convert.ToInt32(input);\n
for (int y = 0; y < n; y++)
\n
{\n
int c = 1;\n
for (int q = 0; q <
n - y; q++)\n
{\n
System.Console.Write(\" \"
);\n
}\n
for (int x = 0; x <= y; x++)\n
{\n
System.Console.Write(\" {0:D} \", c);\n
c = c * (y - x) / (x + 1);\n
}\n
System.Con
sole.WriteLine();\n
System.Console.WriteLine();\n
}\n
System.Console.WriteLine();\n
}\n
}\n}",
"#include <iostream>\n\nint main()\n{\n
int n, x, y, c, q\n
cout << \"Enter Number of Rows:\";\n
cin >> n;\n
for (y = 0; y < n; y++)
{\n
c = 1;\n
for(q = 0; q < n - y; q++) {\n
printf(\"%
3s\", \" \");\n
}\n
for (x = 0; x <= y; x++) {\n
prin
tf(\" %3d \",c);\n
c = c * (y - x) / (x + 1);\n
}\n
cout << endl;\n
}\n
cout << endl;\n
return 0;\n}",
"public static long[][] pascal(final int n) {\n
long[][] a =
new long[n][];\n
for(int i = 0; i < n; i++)\n
a[i] = new long[i + 2];\
n
a[0][0] = 1;\n
for(int i = 1; i < a.length; i++) {\n
a[i][0] = 1;
\n
for(int j = 1; j < a[i].length - 1; j++) {\n
a[i][j] = a[i
- 1][j] + a[i - 1][j - 1];\n
}\n
}\n
return a;\n}"
],
"key" : 102,
"order" : "O(n ^ 2)",
"shortTitle" : "Pascal Triangle",
"tileImage" : "images/math/pasc2.gif",
"title" : "Pascal Triangle"
},
{ "backgroundImage" : "images/math/primefac.gif",
"description" : "Prime Numbers:\nA Prime Number can be divided evenly only b
y 1 or itself. And it must be a whole number greater than 1.\n\nFactors:\n\"Fact
ors\" are the numbers you multiply together to get another number:\n
2 * 3 = 6 [2, 3 are factors]\n
\nPrime Factorization:
\n\"Prime Factorization\" is finding which prime numbers multiply together to ma
ke the original number.\n\nExample: What are the prime factors of 12 ?\nIt is be
st to start working from the smallest prime number, which is 2, so let's check:\
n
12 2 = 6\nYes, it divided evenly by 2. We have taken the first step!\nB
ut 6 is not a prime number, so we need to go further. Let's try 2 again:\n
6 2 = 3\nYes, that worked also. And 3 is a prime number, so we have the answer
:\n
12 = 2 2 3\nAs you can see, every factor is a prime number, so the an
swer must be right.\nNote: 12 = 2 2 3 can also be written using exponents as 12

= 22 3\n\nWhy?\nA prime number can only be divided by 1 or itself, so it cannot


be factored any further!\nEvery other whole number can be broken down into prime
number factors. It is like the Prime Numbers are the basic building blocks of a
ll numbers\n\nAnd here is another thing:\nThere is only one (unique!) set of pri
me factors for any number.\nExample The prime factors of 330 are 2, 3, 5 and 11:
\n
[ 330 = 2 3 5 11 ]\nThere is no other possible set of prime numbers th
at can be multiplied to make 330.",
"group" : { "backgroundImage" : "images/math/math_1.gif",
"description" : "Set of Mathematical based Algorithms",
"groupImage" : "images/math/math_header.png",
"key" : "Mathematics",
"shortTitle" : "Math",
"title" : "Mathematics"
},
"algorithms" : [ "using System.Collections;\n\npublic static int[] PrimeFact
ors(int num)\n{ \n
ArrayList factors = new ArrayList();\n
bool alreadyCou
nted = false; \n
while (num % 2 == 0) \n
{ \n
if (alreadyCounted
== false) \n
{ \n
factors.Add(2); \n
alreadyCou
nted = true; \n
} \n
num = num / 2; \n
}\n
\n
int di
visor = 3;\n
alreadyCounted = false; \n
while (divisor <= num) \n
{
\n
if (num % divisor == 0) \n
{ \n
if (alreadyCounted
== false) \n
{ \n
factors.Add(divisor); \n
alreadyCounted = true; \n
} \n
num = num / divis
or; \n
} \n
else \n
{ \n
alreadyCounted = fa
lse; \n
divisor += 2; \n
}\n
} \n
int[] returnFactors
= (int[])factors.ToArray(typeof(int));\n
return returnFactors; \n}",
"#include<stdio.h>\n#include<math.h>\n\nprime_factorization(long
x)\n{\n
long i;\n
long c;\n
c = x;\n
while ((c % 2) == 0) {\n
printf(\"%ld\\n\",2);\n
c = c / 2;\n
}\n
i = 3;\n
while (i <=
(sqrt(c)+1)) {\n
if ((c % i) == 0) {\n
printf(\"%ld\\n\",i);\n
c = c / i;\n
}\n
else\n
i = i + 2;\n
}\n
if (c > 1) printf(\"%ld\\n\",c);\n}",
"public static List<Integer> primeFactors(int numbers) {\n
in
t n = numbers;\n
List<Integer> factors = new ArrayList<Integer>();\n
for (
int i = 2; i <= n / i; i++) {\n
while (n % i == 0) {\n
factors.add(i
);\n
n /= i;\n
}\n
}\n
if (n > 1) {\n
factors.add(n);\n
}\n
return factors;\n}"
],
"key" : 103,
"order" : "O(root(n))",
"shortTitle" : "Prime Factors",
"tileImage" : "images/math/primefac.gif",
"title" : "Prime Factors"
},
{ "backgroundImage" : "images/math/modexp.gif",
"description" : "Modular multiplication:\nGiven two numbers, a and b, their
product modulo n is a * b mod n. Consider the number x < n, such that x = a mod
n. Such a number always exists, and we usually call it the remainder of dividing
a by n. Similarly, there is a y < b, such that y = b mod n. It follows from ba
sic rules of modular arithmetic that x * y = a * b mod n. Therefore, if we wan
t to know the product of a and b modulo n, we just have to keep their remainders
when divided by n. Note: a and b may be arbitrarily large, but x and y are alwa
ys smaller than n.\n\nUsing the properties of modular multiplication:\nAs weve le
arned above, modular multiplication allows us to just keep the intermediate resu
lt mod n at each step. So we dont have to ever hold numbers larger than the modul
o. Heres the implementation of a simple repeated multiplication algorithm for com
puting modular exponents this way:\nNB: python code:\n\n
pow(a, b, n)\n
r
= 1\n
for i in xrange(b):\n
r = r * a % n\n
return r\n
\nWe use
exactly the same algorithm, but reduce every multiplication mod n. So the number
s we deal with here are never very large.",

"group" : { "backgroundImage" : "images/math/math_1.gif",


"description" : "Set of Mathematical based Algorithms",
"groupImage" : "images/math/math_header.png",
"key" : "Mathematics",
"shortTitle" : "Math",
"title" : "Mathematics"
},
"algorithms" : [ "Bignum modpow(Bignum base, Bignum exponent, Bignum modulus
) {\n
Bignum result = 1;\n
while (exponent > 0) {\n
if ((exponent &
1) == 1) {\n
result = (result * base) % modulus;\n
}\n
exponent >>= 1;\n
base = (base * base) % modulus;\n
}\n
return res
ult;\n}",
"int modExp(int b, int e, int m) {\n
int remainder;\n
int
x = 1;\n
while (e != 0) {\n
remainder = e % 2;\n
e = e/2;\n
if (remainder == 1)\n
x = (x * b) % m;\n
b = (b * b) % m;
// New base equal b^2 % m\n
}\n
return x;\n}",
"public static double pow(double x, int y) {\n
if(y == 0)\n
return 1.0;\n
double tmp = pow(x, y / 2);\n
if(y % 2 == 0)\n
return tmp * tmp;\n
else {\n
return tmp * tmp * (y > 0? x : 1/x);\n
}\n}"
],
"key" : 104,
"order" : "O(k M(n))",
"shortTitle" : "Modular Exponentiation",
"tileImage" : "images/math/modexp.gif",
"title" : "Modular Exponentiation"
},

{ "backgroundImage" : "images/tree/traversal1.png",
"description" : "For Binary Trees:\nThere are three types of depth-first tra
versal: pre-order, in-order and post-order. For a binary tree, they are defined
as operations recursively at each node, starting with the root node follows:\n\n
Pre-order:\n
1. Visit the root.\n
2. Traverse the left subtree.\n
3. Tr
averse the right subtree.\n\nIn-order (symmetric):\n
1. Traverse the left sub
tree.\n
2. Visit the root.\n
3. Traverse the right subtree.\n\nPost-order:
\n
1. Traverse the left subtree.\n
2. Traverse the right subtree.\n
3.
Visit the root.\n\nThe trace of a traversal is called a sequentialization of the
tree. No one sequentialisation according to pre-, in- or post-order describes t
he underlying tree uniquely, but any combination of two types of sequentialisati
on does.\n\nGeneric tree:\nTo traverse any tree in depth-first order, perform th
e following operations recursively at each node:\n\n
Perform pre-order operat
ion\n
for i=1 to n-1 do\n
Visit child[i], if present\n
Perform
in-order operation\n
Visit child[n], if present\n
Perform post-order opera
tion\n\nwhere n is the number of child nodes. Depending on the problem at hand,
the pre-order, in-order or post-order operations may be void, or you may only wa
nt to visit a specific child node, so these operations are optional. Also, in pr
actice more than one of pre-order, in-order and post-order operations may be req
uired. For example, when inserting into a ternary tree, a pre-order operation is
performed by comparing items. A post-order operation may be needed afterwards t
o re-balance the tree.",
"group" : { "backgroundImage" : "images/tree/traversal1.png",
"description" : "Set of Basic Tree Algorithms and Special Trees",
"groupImage" : "images/tree/tree_header.png",
"key" : "Trees",
"shortTitle" : "Trees",
"title" : "Trees"

},
"algorithms" : [ "public void Preorder(Node Root)\n{\n
if (Root != null)\
n
{\n
Console.Write(Root.item + \" \");\n
Preorder(Root.leftChi
ld);\n
Preorder(Root.rightChild);\n
}\n}\npublic void Inorder(Node Roo
t)\n{\n
if (Root != null)\n
{\n
Inorder(Root.leftChild);\n
C
onsole.Write(Root.item + \" \");\n
Inorder(Root.rightChild);\n
}\n}\np
ublic void Postorder(Node Root)\n{\n
if (Root != null)\n
{\n
Postor
der(Root.leftChild);\n
Postorder(Root.rightChild);\n
Console.Write
(Root.item + \" \");\n
}\n}",
"/ Print the tree in-order\n// Traverse the left sub-tree, root,
right sub-tree\nvoid Tree::inOrder(Node* n) {\n
if ( n ) {\n
inOrder(n
->Left());\n
cout << n->Key() << \" \";\n
inOrder(n->Right());\n
}\n}\n\n// Print the tree pre-order\n// Traverse the root, left sub-tree, right
sub-tree\nvoid Tree::preOrder(Node* n) {\n
if ( n ) {\n
cout << n->Key(
) << \" \";\n
preOrder(n->Left());\n
preOrder(n->Right());\n
}\n}
\n\n// Print the tree post-order\n// Traverse left sub-tree, right sub-tree, roo
t\nvoid Tree::postOrder(Node* n) {\n
if ( n ) {\n
postOrder(n->Left());
\n
postOrder(n->Right());\n
cout << n->Key() << \" \";\n
}\n}",
"/** Traverses the tree in a pre-order approach, starting from t
he specified node. */\npublic void preorder( Node node ) {\n
if( node != null
) {\n
System.out.print( node.data + \" \" );\n
preorder( node.lef
t );\n
preorder( node.right );
\n
}\n}\n \n/** Traverses th
e tree in a in-order approach, starting from the specified node. */\nprivate voi
d inorder( Node node ) {\n
if( node != null ) {\n
inorder( node.left )
;\n
System.out.print( node.data + \" \" );\n
inorder( node.right )
;\n
}\n}\n \n/** Traverses the tree in a post-order approach, starting from t
he specified node. */
\npublic void postorder( Node node ) {\n
if( node !
= null ) {\n
postorder( node.left );\n
postorder( node.right );\n
System.out.print( node.data + \" \" );\n
}\n}"
],
"key" : 201,
"order" : "O(log n)",
"shortTitle" : "Depth First Search",
"tileImage" : "images/tree/traversal1.png",
"title" : "Depth First Search"
},
{ "backgroundImage" : "images/tree/bst.png",
"description" : "First of all, binary search tree (BST) is a dynamic data st
ructure, which means, that its size is only limited by amount of free memory in
the operating system and number of elements may vary during the program run. Mai
n advantage of binary search trees is rapid search, while addition is quite chea
p. Let us see more formal definition of BST.\n\nBinary search tree is a data str
ucture, which meets the following requirements:\n\n
1. it is a binary tree;\n
2. each node contains a value;\n
3. a total order is defined on these val
ues (every two values can be compared with each other);\n
4. left subtree of
5. right subtree
a node contains only values lesser, than the nodes value;\n
of a node contains only values greater, than the nodes value.\n\nNotice, that
definition above doesnt allow duplicates.\n\nWhat for binary search trees are u
sed?\n\nBinary search tree is used to construct map data structure. In practice,
data can be often associated with some unique key. For instance, in the phone b
ook such a key is a telephone number. Storing such a data in binary search tree
allows to look up for the record by key faster, than if it was stored in unorder
ed list. Also, BST can be utilized to construct set data structure, which allows
to store an unordered collection of unique values and make operations with such
collections.\n\nPerformance of a binary search tree depends of its height. In o
rder to keep tree balanced and minimize its height, the idea of binary search tr
ees was advanced in balanced search trees (AVL trees, Red-Black trees, Splay tre
es). Here we will discuss the basic ideas, laying in the foundation of binary se
arch trees.\n\nSearching Algorithm:\nSearching a binary search tree for a specif
ic key can be a recursive or iterative process.\n\nWe begin by examining the roo

t node. If the tree is null, the key we are searching for does not exist in the
tree. Otherwise, if the key equals that of the root, the search is successful. I
f the key is less than the root, search the left subtree. Similarly, if it is gr
eater than the root, search the right subtree. This process is repeated until th
e key is found or the remaining subtree is null. If the searched key is not foun
d before a null subtree is reached, then the item must not be present in the tre
e.",
"group" : { "backgroundImage" : "images/tree/traversal1.png",
"description" : "Set of Basic Tree Algorithms and Special Trees",
"groupImage" : "images/tree/tree_header.png",
"key" : "Trees",
"shortTitle" : "Trees",
"title" : "Trees"
},
"algorithms" : [ "private TreeNode<T> Find(T x, TreeNode<T> t)\n{\n
while
(t != null)\n
{\n
if ((x as IComparable).CompareTo(t.Element) < 0)\n
{\n
t = t.Left;\n
}\n
else if ((x as IComparable
).CompareTo(t.Element) > 0)\n
{\n
t = t.Right;\n
}\n
else\n
{\n
return t;\n
}\n
}\n \n
return nul
l;\n}\n \nprivate TreeNode<T> FindMin(TreeNode<T> t)\n{\n
if (t != null)\n
{\n
while (t.Left != null)\n
{\n
t = t.Left;\n
}\n
}\n \n
return t;\n}\n \nprivate TreeNode<T> FindMax(TreeNode<T> t)\n{\
n
if (t != null)\n
{\n
while (t.Right != null)\n
{\n
t = t.Right;\n
}\n
}\n \n
return t;\n}",
"template <class T>\nBSTNode<T>** BinarySearchTree<T>::search(T
data) {\n
BSTNode<T>** node = &root;\n
while (*node != NULL) {\n
if (data < (*node)->data)\n
node = &(*node)->left;\n
else if (
(*node)->data < data)\n
node = &(*node)->right;\n
else\n
break;\n
}\n
return node;\n}",
"public E bin_search(E key) {\n
if (root == null)\n
re
turn null;\n
Node<E> node = root;\n
int compareResult;\n
while ((compar
eResult = node.value.compareTo(key)) != 0) {\n
if (compareResult > 0) {\n
if (node.left != null)\n
node = node.left;\n
else\n
return null;\n
} else {\n
if (node.ri
ght != null)\n
node = node.right;\n
else\n
return null;\n
}\n
}\n
return node.value;\n}"
],
"key" : 202,
"order" : "O(log n)",
"shortTitle" : "Searching in BST",
"tileImage" : "images/tree/bst.png",
"title" : "Searching in Binary Search Tree"
},
{ "backgroundImage" : "images/tree/lca1.png",
"description" : "The lowest common ancestor (LCA) is a concept in graph theo
ry and computer science. Let T be a rooted tree with n nodes. The lowest common
ancestor is defined between two nodes v and w as the lowest node in T that has b
oth v and w as descendants (where we allow a node to be a descendant of itself).
\n\nThe LCA of v and w in T is the shared ancestor of v and w that is located fa
rthest from the root. Computation of lowest common ancestors may be useful, for
instance, as part of a procedure for determining the distance between pairs of n
odes in a tree: the distance from v to w can be computed as the distance from th
e root to v, plus the distance from the root to w, minus twice the distance from
the root to their lowest common ancestor.\n\nIn a tree data structure where eac
h node points to its parent, the lowest common ancestor can be easily determined
by finding the first intersection of the paths from v and w to the root. In gen
eral, the computational time required for this algorithm is O(h) where h is the
height of the tree (length of longest path from a leaf to the root). However, th
ere exist several algorithms for processing trees so that lowest common ancestor
s may be found more quickly, in constant time per query after a linear time prep

rocessing stage.\n\nAn easy solution:\nAs we trace the two paths from both nodes
up to the root, eventually it will merge into one single path. The LCA is the e
xact first intersection node where both paths merged into a single path. An easy
solution is to use a hash table which records visited nodes as we trace both pa
ths up to the root. Once we reached the first node which is already marked as vi
sited, we immediately return that node as the LCA.\n\nThe best solution:\nA litt
le creativity is needed here. Since we have the parent pointer, we could easily
get the distance (height) of both nodes from the root. Once we knew both heights
, we could subtract from one another and get the heights difference (dh). If you
observe carefully from the previous solution, the node which is closer to the ro
ot is always dh steps ahead of the deeper node. We could eliminate the need of m
arking visited nodes altogether. Why?\n\nThe reason is simple, if we advance the
deeper node dh steps above, both nodes would be at the same depth. Then, we adv
ance both nodes one level at a time. They would then eventually intersect at one
node, which is the LCA of both nodes. If not, one of the node would eventually
reach NULL (roots parent), which we conclude that both nodes are not in the same
tree. However, that part of code shouldnt be reached, since the problem statement
assumed that both nodes are in the same tree.",
"group" : { "backgroundImage" : "images/tree/traversal1.png",
"description" : "Set of Basic Tree Algorithms and Special Trees",
"groupImage" : "images/tree/tree_header.png",
"key" : "Trees",
"shortTitle" : "Trees",
"title" : "Trees"
},
"algorithms" : [ "public interface Node\n{\n
Node Parent { get; }\n}\n\np
ublic static Node GetLeastCommonAncestor(Node a, Node b)\n{\n
Stack<Node> aSt
ack = GetStack(a);\n
Stack<Node> bStack = GetStack(b);\n\n
Node zipper;\n
while (aStack.Count > 0 && bStack.Count > 0 && aStack.Pop() == bStack.Peek())
\n
zipper = bStack.Pop();\n\n
return zipper;\n}\n\nprivate static Stac
k<Node> GetStack(Node me)\n{\n
Stack<Node> stack = new Stack<Node>();\n\n
while (me != null)\n
{\n
stack.Push(me);\n
me = me.Parent;\n
}\n\n
return stack;\n}",
"int getHeight(Node *p) {\n int height = 0;\n while (p) {\n
height++;\n
p = p->parent;\n }\n return height;\n}\n \n// As root->parent
is NULL, we dont need to pass root in.\nNode *LCA(Node *p, Node *q) {\n int h1
= getHeight(p);\n int h2 = getHeight(q);\n // swap both nodes in case p is de
eper than q.\n if (h1 > h2) {\n
swap(h1, h2);\n
swap(p, q);\n }\n // in
variant: h1 <= h2.\n int dh = h2 - h1;\n for (int h = 0; h < dh; h++)\n
q =
q->parent;\n while (p && q) {\n
if (p == q) return p;\n
p = p->parent;\n
q = q->parent;\n }\n return NULL; // p and q are not in the same tree\n}"
,
"public static Node findLowestCommonAncestor( Node startNode, in
t a, int b ) {\n
// Find the lowest (nearest) common ancestor\n
while( sta
rtNode != null ) {\n
if( a < startNode.data && b < startNode.data ) {\n
startNode = startNode.left;\n
} else if( a > startNode.data &&
b > startNode.data ) { \n
startNode = startNode.right;\n
} els
e {\n
return startNode;\n
}\n
}\n
return null;\n}"
],
"key" : 203,
"order" : "O(dh)",
"shortTitle" : "Lowest Common Ancestor",
"tileImage" : "images/tree/lca1.png",
"title" : "Lowest Common Ancestor"
},

{ "backgroundImage" : "images/graph/dfs.png",
"description" : "Depth-first search, or DFS, is a way to traverse the graph.
Initially it allows visiting vertices of the graph only, but there are hundreds
of algorithms for graphs, which are based on DFS. Therefore, understanding the
principles of depth-first search is quite important to move ahead into the graph
theory. The principle of the algorithm is quite simple: to go forward (in depth
) while there is such possibility, otherwise to backtrack.\n\nAlgorithm:\nIn DFS
, each vertex has three possible colors representing its state:\n\n
1. white:
vertex is unvisited;\n
2. gray: vertex is in progress;\n
3. black: DFS ha
s finished processing the vertex.\n\nNB: For most algorithms boolean classificat
ion unvisited / visited is quite enough, but we show general case here.\n\nIniti
ally all vertices are white (unvisited). DFS starts in arbitrary vertex and runs
as follows:\n\n
Mark vertex u as gray (visited).\n
For each edge (u, v),
where u is white, run depth-first search for u recursively.\n
Mark vertex u a
s black and backtrack to the parent.\n\nIf a graph is disconnected, DFS wont vi
sit all of its vertices. For details, see finding connected components algorithm
.\n\nComplexity analysis:\nAssume that graph is connected. Depth-first search vi
sits every vertex in the graph and checks every edge its edge. Therefore, DFS co
mplexity is O(V + E). As it was mentioned before, if an adjacency matrix is used
for a graph representation, then all edges, adjacent to a vertex cant be found
efficiently, that results in O(V2) complexity",
"group" : { "backgroundImage" : "images/graph/group_graph.png",
"description" : "Set of Basic Graph Algorithms and Some Advanced Applica
tions on Graphs. Assuming Adjacency List Representations, so All the orders are
calculated",
"groupImage" : "images/graph/graphs_header.png",
"key" : "Graphs",
"shortTitle" : "Graphs",
"title" : "Graphs"
},
"algorithms" : [ "Visit(IVertexListGraph g, IVertex u)\n{\n
Colors[u] = G
ray;\n
OnDiscoverVertex(u); // event\n
// examine edges\n
foreach(IEdge
e in g.OutEdges(u))\n
{\n
OnExamineEdge(e); // event\n
if (Col
ors[u] == White)\n
{\n
OnTreeEdge(e); // event\n
Vi
sit(e.Target);\n
}\n
else if (Colors[u] == Gray)\n
{\n
OnBackEdge(e); // event\n
}\n
else\n
OnForwardOr
CrossEdge(e); // event\n
}\n
Colors[u] = Black;\n
OnFinishVertex(u); //
event\n}",
"enum VertexState { White, Gray, Black };\n\nvoid Graph::DFS() {
\n
VertexState *state = new VertexState[vertexCount];\n
for (int i = 0
; i < vertexCount; i++)\n
state[i] = White;\n
runDFS(0, state);\
n
delete [] state;\n}\n\nvoid Graph::runDFS(int u, VertexState state[]) {\n
state[u] = Gray;\n
for (int v = 0; v < vertexCount; v++)\n
if (isEdge(u, v) && state[v] == White)\n
runDFS(v, state);\n
state[u] = Black;\n}",
"public class Graph {\n
enum VertexState { White, Gray, Black
}\n
public void DFS() {\n
VertexState state[] = new VertexState[verte
xCount];\n
for (int i = 0; i < vertexCount; i++)\n
state[i] =

VertexState.White;\n
runDFS(0, state);\n
}\n\n
public void runDFS(i
nt u, VertexState[] state) {\n
state[u] = VertexState.Gray;\n
for
(int v = 0; v < vertexCount; v++)\n
if (isEdge(u, v) && state[v] == V
ertexState.White)\n
runDFS(v, state);\n
state[u] = VertexS
tate.Black;\n
}\n}"
],
"key" : 301,
"order" : "O(|V| + |E|)",
"shortTitle" : "Depth First Search",
"tileImage" : "images/graph/dfs.png",
"title" : "Depth First Search"
},
{ "backgroundImage" : "images/graph/bfs.png",
"description" : "In graph theory, breadth-first search (BFS) is a graph sear
ch algorithm that begins at the root node and explores all the neighboring nodes
. Then for each of those nearest nodes, it explores their unexplored neighbor no
des, and so on, until it finds the goal.\n\nHow it works:\nBFS is an uninformed
search method that aims to expand and examine all nodes of a graph or combinatio
n of sequences by systematically searching through every solution. In other word
s, it exhaustively searches the entire graph or sequence without considering the
goal until it finds it. It does not use a heuristic algorithm.\n\nFrom the stan
dpoint of the algorithm, all child nodes obtained by expanding a node are added
to a FIFO (i.e., First In, First Out) queue. In typical implementations, nodes t
hat have not yet been examined for their neighbors are placed in some container
(such as a queue or linked list) called \"open\" and then once examined are plac
ed in the container \"closed\".\n\nAlgorithm (informal):\n
1. If the element
sought is found in this node, quit the search and return a result.\n
2. Other
wise enqueue any successors (the direct child nodes) that have not yet been disc
overed.\n\nNote: Using a stack instead of a queue would turn this algorithm into
a depth-first search.\n\nThe algorithm uses a queue data structure to store int
ermediate results as it traverses the graph, as follows:\n
1. Enqueue the roo
t node\n
2. Dequeue a node and examine it\n
2.a. If the element sought
is found in this node, quit the search and return a result.\n
2.b. Other
wise enqueue any successors (the direct child nodes) that have not yet been disc
overed.\n
3. If the queue is empty, every node on the graph has been examined
quit the search and return \"not found\".\n
4. If the queue is not empty, re
peat from Step 2.\n
\nSpace complexity:\nWhen the number of vertices in the g
raph is known ahead of time, and additional data structures are used to determin
e which vertices have already been added to the queue, the space complexity can
be expressed as O(|V|) where |V| is the cardinality of the set of vertices. If t
he graph is represented by an Adjacency list it occupies O(|V|+|E|) space in mem
ory, while an Adjacency matrix representation occupies O(V^2)\n\nTime complexity
:\nThe time complexity can be expressed as O(|V|+|E|) [3] since every vertex and
every edge will be explored in the worst case. Note: O(|E|) may vary between O(
|V|) and O(|V|^2), depending on how sparse the input graph is (assuming that the
graph is connected).",
"group" : { "backgroundImage" : "images/graph/group_graph.png",
"description" : "Set of Basic Graph Algorithms and Some Advanced Applica
tions on Graphs. Assuming Adjacency List Representations, so All the orders are
calculated",
"groupImage" : "images/graph/graphs_header.png",
"key" : "Graphs",
"shortTitle" : "Graphs",
"title" : "Graphs"
},
"algorithms" : [ "NOT FOUND",
"NOT FOUND",
"public void bfs() {\n
// BFS uses Queue data structure\n
Queue queue = new LinkedList();\n
queue.add(this.rootNode);\n
printNode(th
is.rootNode);\n
rootNode.visited = true;\n
while (!queue.isEmpty()) {\n

Node node = (Node) queue.remove();\n


Node child = null;\n
whi
le ((child = getUnvisitedChildNode(node)) != null) {\n
child.visited
= true;\n
printNode(child);\n
queue.add(child);\n
}
\n
}\n
// Clear visited property of nodes\n
clearNodes();\n}"
],
"key" : 302,
"order" : "O(|V| + |E|)",
"shortTitle" : "Breadth First Search",
"tileImage" : "images/graph/bfs.png",
"title" : "Breadth First Search"
},
{ "backgroundImage" : "images/graph/topo.jpg",
"description" : "Topological sort: an ordering of the vertices in a directed
acyclic graph, such that: If there is a path from u to v, then v appears after
u in the ordering.\n\nTypes of graphs:\n
1. The graphs should be directed: ot
herwise for any edge (u,v) there would be a path from u to v and also from v to
u, and hence they cannot be ordered.\n\n
2. The graphs should be acyclic: oth
erwise for any two vertices u and v on a cycle u would precede v and v would pre
cede u.\n
\nDegree of a vertex U: the number of edges (U,V) - outgoing edges\
nIndegree of a vertex U: the number of edges (V,U) - incoming edges\n\nThe algor
ithm for topological sort uses \"indegrees\" of vertices.\n\nAlgorithm:\n
1.
Compute the indegrees of all vertices\n
2. Find a vertex U with indegree 0 an
d print it (store it in the ordering)\n
If there is no such vertex then t
here is a cycle and the vertices cannot be ordered. Stop.\n
3. Remove U and a
ll its edges (U,V) from the graph.\n
4. Update the indegrees of the remaining
vertices.\n
5. Repeat steps 2 through 4 while there are vertices to be proce
ssed\n\nImproved Algorithm:\nAfter the initial scanning to find a vertex of degr
ee 0, we need to scan only those vertices whose updated indegrees have become eq
ual to zero.\n\n
1. Store all vertices with indegree 0 in a queue\n
2. get
a vertex U and place it in the sorted sequence (array or another queue).\n
3
. For all edges (U,V) update the indegree of V, and put V in the queue if the up
dated indegree is 0.\n
4. Perform steps 2 and 3 while the queue is not empty.
\n\nComplexity:\nThe number of operations is O(|E| + |V|), where |V| - number of
vertices, |E| - number of edges.\nHow many operations are needed to compute the
indegrees?\n\nDepends on the representation:\nAdjacency lists: O(|E|)\nMatrix:
O(|V|2)\n\nNote that if the graph is complete |E| = O(|V|2)",
"group" : { "backgroundImage" : "images/graph/group_graph.png",
"description" : "Set of Basic Graph Algorithms and Some Advanced Applica
tions on Graphs. Assuming Adjacency List Representations, so All the orders are
calculated",
"groupImage" : "images/graph/graphs_header.png",
"key" : "Graphs",
"shortTitle" : "Graphs",
"title" : "Graphs"
},
"algorithms" : [ "public bool Sort(out Queue<T> sortedQueue)\n{\n
sortedQ
ueue = new Queue<T>(); // create, even if it stays empty\n
var outputQueue =
new Queue<T>(); // with predecessorCount == 0\n
foreach( KeyValuePair<T, Node
Info> kvp in Nodes ) \n
if( kvp.Value.PredecessorCount == 0 )\n
outputQueue.Enqueue(kvp.Key) ;\n
T nodeKey;\n
NodeInfo nodeInfo;\n
while (outputQueue.Count != 0)\n
{\n
nodeKey = outputQueue.Dequeue()
;\n
sortedQueue.Enqueue(nodeKey); // add it to sortedQueue\n
nodeI
nfo = Nodes[nodeKey]; // get successors of nodeKey\n
Nodes.Remove(nodeKey
);
// remove it from Nodes\n
foreach (T successor in nodeInfo.Successo
rs)\n
if (--Nodes[successor].PredecessorCount == 0)\n
outputQueue.Enqueue(successor);\n
nodeInfo.Clear();\n
}\n
// output
Queue is empty here\n
if (Nodes.Count == 0)\n
return true;
// if th
ere are no nodes left in Nodes, return true\n
// there is at least one cycle\
n
CycleInfo(sortedQueue); // get one cycle in sortedQueue\n
return false;
// and fail\n}",

"#include <cstdio>\n#include <iostream>\n#include <algorithm>\n#


include <vector>\n#include <queue>\n#define MAX 101\nusing namespace std;\n\nint
INDEG[MAX], node, edge;\n\nvector<int> TOPOLOGICAL_SORT(int [][MAX]);\n\nint ma
in(void)\n{\n
int adjMatrix[MAX][MAX];\n
for(int i=0; i<MAX; i++)\n
for(int j=0; j<MAX; j++) {\n
adjMatrix[i][j] = 0;\n
INDEG
[i] = 0;\n
}\n
int M, N;\n
cin >> node >> edge;\n
for(int e=1; e
<=edge; e++) {\n
cin >> M >> N;\n
adjMatrix[M][N] = 1;\n
IN
DEG[N]++;\n
}\n
vector<int> orderedList = TOPOLOGICAL_SORT(adjMatrix);\n
for(int i=0; i<orderedList.size(); i++) {\n
cout << orderedList[i] << e
nds;\n
}\n
return 0;\n}\n\nvector<int> TOPOLOGICAL_SORT(int S[][MAX]) {\n
queue<int> Q;\n
for(int n=1; n<=node; n++) {\n
if(INDEG[n] == 0) Q.
push(n);\n
}\n
vector<int> topoList;\n
while( !Q.empty()) {\n
in
t N = Q.front(); Q.pop();\n
topoList.push_back(N);\n
for(int M=1;
M<=node; M++) {\n
if(S[N][M] == 1) {\n
INDEG[M]--;\n
if(INDEG[M] == 0) {\n
Q.push(M);\n
}\n
}\n
}\n
}\n
return topoList;\n}",
"public class Topological {\n
private Iterable<Integer> order
;
// topological order\n\n
// topological sort in a digraph\n
public To
pological(Digraph G) {\n
DirectedCycle finder = new DirectedCycle(G);\n
if (!finder.hasCycle()) {\n
DepthFirstOrder dfs = new DepthFirs
tOrder(G);\n
order = dfs.reversePost();\n
}\n
}\n
\n
// topological sort in an edge-weighted digraph\n
public Topological(EdgeWeig
htedDigraph G) {\n
EdgeWeightedDirectedCycle finder = new EdgeWeightedDir
ectedCycle(G);\n
if (!finder.hasCycle()) {\n
DepthFirstOrder d
fs = new DepthFirstOrder(G);\n
order = dfs.reversePost();\n
}\
n
}\n
\n
// return topological order if a DAG; null otherwise\n
publ
ic Iterable<Integer> order() {\n
return order;\n
}\n
\n
// does
digraph have a topological order?\n
public boolean hasOrder() {\n
retu
rn order != null;\n
}\n
\n}"
],
"key" : 303,
"order" : "O(|E|)",
"shortTitle" : "Topological Sort",
"tileImage" : "images/graph/topo.jpg",
"title" : "Topological Sort"
},
{ "backgroundImage" : "images/graph/dijkstra.gif",
"description" : "Dijkstras algorithm solves the Single-Source Shortest-Path
problem when all edges have non-negative weights. It is a greedy algorithm and
similar to Prims algorithm. Algorithm starts at the source vertex, s, it grows
a tree, T, that ultimately spans all vertices reachable from S. Vertices are add
ed to T in order of distance i.e., first S, then the vertex closest to S, then t
he next closest, and so on. Following implementation assumes that graph G is rep
resented by adjacency lists\n\nAlgorithm:\nLet the node at which we are starting
be called the initial node. Let the distance of node Y be the distance from the
initial node to Y. Dijkstras algorithm will assign some initial distance value
s and will try to improve them step by step.\n\n
1. Assign to every node a te
ntative distance value: set it to zero for our initial node and to infinity for
all other nodes.\n
2. Mark all nodes unvisited. Set the initial node as curre
nt. Create a set of the unvisited nodes called the unvisited set consisting of a
ll the nodes except the initial node.\n
3. For the current node, consider all
of its unvisited neighbors and calculate their tentative distances. For example
, if the current node A is marked with a distance of 6, and the edge connecting
it with a neighbor B has length 2, then the distance to B (through A) will be 6+
2=8. If this distance is less than the previously recorded tentative distance of
B, then overwrite that distance. Even though a neighbor has been examined, it i
s not marked as \"visited\" at this time, and it remains in the unvisited set.\n
4. When we are done considering all of the neighbors of the current node, ma
rk the current node as visited and remove it from the unvisited set. A visited n
ode will never be checked again.\n
5. If the destination node has been marked

visited (when planning a route between two specific nodes) or if the smallest t
entative distance among the nodes in the unvisited set is infinity (when plannin
g a complete traversal), then stop. The algorithm has finished.\n
6. Select t
he unvisited node that is marked with the smallest tentative distance, and set i
t as the new \"current node\" then go back to step 3.\n
\nAnalysis:\n\nQ as a
linear array:\n
EXTRACT_MIN takes O(V) time and there are |V| such operation
s. Therefore, a total time for EXTRACT_MIN in while-loop is O(V2). Since the tot
al number of edges in all the adjacency list is |E|. Therefore for-loop iterates
|E| times with each iteration taking O(1) time. Hence, the running time of the
algorithm with array implementation is O(V2 + E) = O(V2).\n\nQ as a binary heap
(If G is sparse):\n
In this case, EXTRACT_MIN operations takes O(lg V) time a
nd there are |V| such operations.\n
The binary heap can be build in O(V) time
.\n
Operation DECREASE (in the RELAX) takes O(lg V) time and there are at mos
t such operations.\n
Hence, the running time of the algorithm with binary hea
p provided given graph is sparse is O((V + E) lg V). Note that this time becomes
O(ElgV) if all vertices in the graph is reachable from the source vertices.\n \
nQ as a Fibonacci heap:\n
In this case, the amortized cost of each of |V| EXT
RAT_MIN operations if O(lg V).\n
Operation DECREASE_KEY in the subroutine REL
AX now takes only O(1) amortized time for each of the |E| edges.\n
As we have
mentioned above that Dijkstras algorithm does not work on the digraph with neg
ative-weight edges. Now we give a simple example to show that Dijkstras algorit
hm produces incorrect results in this situation. Consider the digraph consists o
f V = {s, a, b} and E = {(s, a), (s, b), (b, a)} where w(s, a) = 1, w(s, b) = 2,
and w(b, a) = -2.\n
Dijkstras algorithm gives d[a] = 1, d[b] = 2. But due t
o the negative-edge weight w(b, a), the shortest distance from vertex s to verte
x a is 1-2 = -1",
"group" : { "backgroundImage" : "images/graph/group_graph.png",
"description" : "Set of Basic Graph Algorithms and Some Advanced Applica
tions on Graphs. Assuming Adjacency List Representations, so All the orders are
calculated",
"groupImage" : "images/graph/graphs_header.png",
"key" : "Graphs",
"shortTitle" : "Graphs",
"title" : "Graphs"
},
"algorithms" : [ "public string dijkstra(string src, string dest)\n{\n
Pr
iorityQueue toVisit = new PriorityQueue();\n
List<string> visited = new List<
string>();\n
int[] dist = new int[vertices.Count];\n
string[] prev = new s
tring[vertices.Count];\n\n
for (int i = 0; i < dist.Length; i++)\n
{\n
dist[i] = Int32.MaxValue;\n
prev[i] = null;\n
}\n
dist[vertices
.IndexOf(src)] = 0;\n
toVisit.Enqueue(src, null, 0);\n\n
vertNode temp;\n
List<string> neighbors;\n
string source;\n
int costOfMove;\n
\n
w
hile (toVisit.Peek())\n
{\n
temp = toVisit.Dequeue();\n
source
= temp.retTo();\n
neighbors = Neighbors(source);\n
visited.Add(sou
rce);\n\n
foreach (string vertex in neighbors)\n
{\n
if
(!visited.Contains(vertex))\n
{\n
costOfMove = EdgeCo
st(source, vertex) + dist[vertices.IndexOf(source)];\n
toVisit.En
queue(vertex, source, costOfMove);\n
if (costOfMove < dist[vertic
es.IndexOf(vertex)])\n
{\n
dist[vertices.Index
Of(vertex)] = costOfMove;\n
prev[vertices.IndexOf(vertex)] =
source;\n
}\n
}\n
}\n
}\n}",
"#include <iostream>\n#include <set>\n#include <vector>\nusing n
amespace std;\n \ntypedef vector<int> vi;\ntypedef pair<int,int> ii;\ntypedef ve
ctor<ii> vii;\ntypedef vector<vii> vvii;\n \nconst int MAX = 1001;\nconst int MA
XINT = 1000000000;\n \nint n;\nvvii G(MAX);\nvi D(MAX, MAXINT);\n \nvoid Dijkstr
a(int s) {\n
set<ii> Q;\n
D[s] = 0;\n
Q.insert(ii(0,s));\n
while(!Q.
empty()) {\n
ii top = *Q.begin();\n
Q.erase(Q.begin());\n
i
nt v = top.second;\n
int d = top.first;\n\n
for (vii::const_iterat
or it = G[v].begin(); it != G[v].end(); it++) {\n
int v2 = it->first;
\n
int cost = it->second;\n
if (D[v2] > D[v] + cost) {\n

if (D[v2] != 1000000000) {\n


Q.erase(Q.find(ii(
D[v2], v2)));\n
}\n
D[v2] = D[v] + cost;\n
Q.insert(ii(D[v2], v2));\n
}\n
}\n
}\n}",
"public static void computePaths(Vertex source) {\n
source.mi
nDistance = 0.;\n
PriorityQueue<Vertex> vertexQueue = new PriorityQueue<Verte
x>();\n
vertexQueue.add(source);\n\n
while (!vertexQueue.isEmpty()) {\n
Vertex u = vertexQueue.poll();\n
// Visit each edge exiting u\n
for (Edge e : u.adjacencies) {\n
Vertex v = e.target;\n
d
ouble weight = e.weight;\n
double distanceThroughU = u.minDistance +
weight;\n
if (distanceThroughU < v.minDistance) {\n
vertexQueue.remove(v);\n
v.minDistance = distanceThroug
hU ;\n
v.previous = u;\n
vertexQueue.add(v
);\n
}\n
}\n
}\n}\n\npublic static List<Vertex> getShor
testPathTo(Vertex target) {\n
List<Vertex> path = new ArrayList<Vertex>();\n
for (Vertex vertex = target; vertex != null; vertex = vertex.previous)\n
path.add(vertex);\n
Collections.reverse(path);\n
return path;\n}"
],
"key" : 304,
"order" : "O(|V|2)",
"shortTitle" : "Dijkstra Algorithm",
"tileImage" : "images/graph/dijkstra.gif",
"title" : "Dijkstra Algorithm"
},
{ "backgroundImage" : "images/graph/belman.gif",
"description" : "Given a graph and a source vertex src in graph, find shorte
st paths from src to all vertices in the given graph. The graph may contain nega
tive weight edges.\nWe have discussed Dijkstras algorithm for this problem. Dijks
ras algorithm is a Greedy algorithm and time complexity is O(VLogV) (with the use
of Fibonacci heap). Dijkstra doesnt work for Graphs with negative weight edges,
Bellman-Ford works for such graphs. Bellman-Ford is also simpler than Dijkstra a
nd suites well for distributed systems. But time complexity of Bellman-Ford is O
(BE), which is more than Dijkstra.\n\nAlgorithm\nFollowing are the detailed step
s.\n\nInput: Graph and a source vertex src\nOutput: Shortest distance to all ver
tices from src. If there is a negative weight cycle, then shortest distances are
not calculated, negative weight cycle is reported.\n\n1) This step initializes
distances from source to all vertices as infinite and distance to source itself
as 0. Create an array dist[] of size |V| with all values as infinite except dist
[src] where src is source vertex.\n\n2) This step calculates shortest distances.
Do following |V|-1 times where |V| is the number of vertices in given graph.\n..
a) Do following for each edge u-v\nIf dist[v] > dist[u] + weight of edge uv, then upda
te dist[v]\n.dist[v] = dist[u] + weight of edge uv\n\n3) This step reports if there is
a negative weight cycle in graph. Do following for each edge u-v\nIf dist[v] > dis
t[u] + weight of edge uv, then Graph contains negative weight cycle\nThe idea of s
tep 3 is, step 2 guarantees shortest distances if graph doesnt contain negative w
eight cycle. If we iterate through all edges one more time and get a shorter pat
h for any vertex, then there is a negative weight cycle\n\nHow does this work? L
ike other Dynamic Programming Problems, the algorithm calculate shortest paths i
n bottom-up manner. It first calculates the shortest distances for the shortest
paths which have at-most one edge in the path. Then, it calculates shortest path
s with at-nost 2 edges, and so on. After the ith iteration of outer loop, the sh
ortest paths with at most i edges are calculated. There can be maximum |V| 1 edg
es in any simple path, that is why the outer loop runs |v| 1 times. The idea is,
if we have calculated shortest paths with at most i edges, then an iteration ov
er all edges guarantees to give shortest path with at-most (i+1) edges\n\nComple
xity \nThe Bellman-Ford algorithm runs in time O(VE), since the initialization t
akes O(V) time, each of the |V| - 1 passes over the edges takes O(E) time and ca
lculating the distance takes O(E) times",
"group" : { "backgroundImage" : "images/graph/group_graph.png",
"description" : "Set of Basic Graph Algorithms and Some Advanced Applica
tions on Graphs. Assuming Adjacency List Representations, so All the orders are

calculated",
"groupImage" : "images/graph/graphs_header.png",
"key" : "Graphs",
"shortTitle" : "Graphs",
"title" : "Graphs"
},
"algorithms" : [ "NOT FOUND",
"void BellmanFord(int graph[][maxVertices],int cost[][maxVertice
s],int size[],int source,int vertices)\n{\n
int distance[vertices];\n
int iter,jter,from,to;\n
for(iter=0;iter<vertices;iter++)\n
{\
n
distance[iter] = INF;\n
}\n
distance[source] = 0;
\n
/* We have to repeatedly update the distance |V|-1 times where |V| rep
resents\n
number of vertices */\n
for(iter=0;iter<vertices-1;it
er++)\n
{\n
for(from=0;from<vertices;from++)\n
{\n
for(jter=0;jter<size[from];jter++)\n
{\n
to = graph[from][jter];\n
if(distance[from] + cost[from][jter] < distance[to])\n
{\n
distance
[to] = distance[from] + cost[from][jter];\n
}\n
}\n
}\n
}\n
for(iter=0;iter<v
ertices;iter++)\n
{\n
printf(\"The shortest distance to %d
is %d\\n\",iter,distance[iter]);\n
}\n\n\n}",
"public void run() {\n
distance = new int[n];\n
parent = n
ew int[n];\n
int i;\n
for (i = 0; i < n; i++)\n
distance[i] = INFIN
ITY;\n
distance[src] = 0;\n
solved = shortestPath();\n}\n\npublic boolean
shortestPath() {\n
distance[src] = 0;\n
int i, j;\n
Edge e;\n
boolea
n finished = false;\n
for (i = 0; i < n - 1 && !finished; i++) {// i is just
a counter\n
finished = true;\n
for (j = 0; j < edges.length; j++)
{\n
e = edges[j];\n
if (relax(e.to, e.from, e.weight))\n
finished = false;\n
}\n
}\n
// checking for negative
cycles, so only write them\n
// if you need to check for negative cycles\n
for (i = 0; i < edges.length; i++) {\n
if (distance[edges[i].to] > dista
nce[edges[i].from]\n
+ edges[i].weight)\n
return false
;\n
}\n
return true;\n}\n\npublic boolean relax(int u, int v, int weight)
{\n
if (distance[u] > distance[v] + weight) {\n
distance[u] = distance
[v] + weight;\n
parent[u] = v;\n
return true;\n
}\n
return f
alse;\n}"
],
"key" : 305,
"order" : "O(|E|)",
"shortTitle" : "Bellman Ford Algorithm",
"tileImage" : "images/graph/belman.gif",
"title" : "Bellman Ford Algorithm"
},

{ "backgroundImage" : "images/dynamic/lcs_2.png",
"description" : "So we want to solve the longest common subsequence problem
by dynamic programming. To do this, we first need a recursive solution. The dyna
mic programming idea doesnt tell us how to find this, it just gives us a way of
making the solution more efficient once we have.\n\nLets start with some simpl
e observations about the LCS problem. If we have two strings, say \"nematode kno
wledge\" and \"empty bottle\", we can represent a subsequence as a way of writin
g the two so that certain letters line up:\n\n
n e m a t o d e k n o w l e
d g e\n
| | |
|
|
| |\n
e m p t y
b o t t l
e\n\nIf we draw lines connecting the letters in the first string to the corresp
onding letters in the second, no two lines cross (the top and bottom endpoints o
ccur in the same order, the order of the letters in the subsequence). Conversely
any set of lines drawn like this, without crossings, represents a subsequence.\
n\nFrom this we can observe the following simple fact: if the two strings start
with the same letter, its always safe to choose that starting letter as the fir
st character of the subsequence. This is because, if you have some other subsequ
ence, represented as a collection of lines as drawn above, you can \"push\" the
leftmost line to the start of the two strings, without causing any other crossin
gs, and get a representation of an equally-long subsequence that does start this
way.\n\nOn the other hand, suppose that, like the example above, the two first
characters differ. Then it is not possible for both of them to be part of a comm
on subsequence - one or the other (or maybe both) will have to be removed.\n\nFi
nally, observe that once weve decided what to do with the first characters of t
he strings, the remaining subproblem is again a longest common subsequence probl
em, on two shorter strings. Therefore we can solve it recursively.\n\nRather tha
n finding the subsequence itself, it turns out to be more efficient to find the
length of the longest subsequence. Then in the case where the first characters d
iffer, we can determine which subproblem gives the correct solution by solving b
oth and taking the max of the resulting subsequence lengths. Once we turn this i
nto a dynamic program we will see how to get the sequence itself.\n\nThese obser
vations give us the following, very inefficient, recursive algorithm\n\nRecursiv
e LCS:\n\n
int lcs_length(char * A, char * B)\n
{\n
if (*A == EOF |
| *B == EOF) return 0;\n
else if (*A == *B) return 1 + lcs_length(A+1, B+
1);\n
else return max(lcs_length(A+1,B), lcs_length(A,B+1));\n
}\n\nTh
is is a correct solution but its very time consuming. For example, if the two s
trings have no matching characters, so the last line always gets executed, the t
he time bounds are binomial coefficients, which (if m=n) are close to 2^n.\n\nTh
e problem with the recursive solution is that the same subproblems get called ma
ny different times. A subproblem consists of a call to lcs_length, with the argu
ments being two suffixes of A and B, so there are exactly (m+1)(n+1) possible su
bproblems (a relatively small number). If there are nearly 2^n recursive calls,
some of these subproblems must be being solved over and over.\n\nThe dynamic pro
gramming solution is to check whenever we want to solve a subproblem, whether we
ve already done it before. If so we look up the solution instead of recomputing
it. Implemented in the most direct way, we just add some code to our recursive
algorithm to do this look up -- this \"top down\", recursive version of dynamic
programming is known as \"memoization\".\n\nIn the LCS problem, subproblems cons
ist of a pair of suffixes of the two input strings. To make it easier to store a
nd look up subproblem solutions, Ill represent these by the starting positions
in the strings, rather than (as I wrote it above) character pointers. \nNow to t
urn this into a dynamic programming algorithm we need only use an array to store
the subproblem results. When we want the solution to a subproblem, we first loo
k in the array, and check if there already is a solution there. If so we return
it; otherwise we perform the computation and store the result. In the LCS proble
m, no result is negative, so well use -1 as a flag to tell the algorithm that n
othing has been stored yet. \n\nTime analysis: each call to subproblem takes con
stant time. We call it once from the main routine, and at most twice every time
we fill in an entry of array L. There are (m+1)(n+1) entries, so the total numbe

r of calls is at most 2(m+1)(n+1)+1 and the time is O(mn).\n\nAs usual, this is


a worst case analysis. The time might sometimes better, if not all array entries
get filled out. For instance if the two strings match exactly, well only fill
in diagonal entries and the algorithm will be fast.",
"group" : { "backgroundImage" : "images/dynamic/dp_group.jpg",
"description" : "Set of Basic Algorithms related to Dynamic Programming
Concept. Illustrates this concept and give valuable examples",
"groupImage" : "images/dynamic/dynamic_header.png",
"key" : "Dynamic Programming",
"shortTitle" : "Dynamic Programming",
"title" : "Dynamic Programming"
},
"algorithms" : [ "public static int GetLCS(string str1, string str2)\n{\n
int[,] table;\n
return GetLCSInternal(str1, str2, out table);\n}\n \nprivate
static int GetLCSInternal(string str1, string str2, out int[,] matrix)\n{\n
matrix = null;\n \n
if (string.IsNullOrEmpty(str1) || string.IsNullOrEmpty(st
r2))\n
{\n
return 0;\n
}\n \n
int[,] table = new int[str1.Length
+ 1, str2.Length + 1];\n
for (int i = 0; i < table.GetLength(0); i++)\n
{
\n
table[i, 0] = 0;\n
}\n
for(int j= 0;j<table.GetLength(1); j++)\n
{\n
table[0,j] = 0;\n
}\n \n
for (int i = 1; i < table.GetLengt
h(0); i++)\n
{\n
for (int j = 1; j < table.GetLength(1); j++)\n
{\n
if (str1[i-1] == str2[j-1])\n
table[i, j] = table
[i - 1, j - 1] + 1;\n
else\n
{\n
if (table[
i, j - 1] > table[i - 1, j])\n
table[i, j] = table[i, j - 1];
\n
else\n
table[i, j] = table[i - 1, j];\n
}\n
}\n
}\n \n
matrix = table;\n
return table[str1.Lengt
h, str2.Length];\n}",
"void setAt(size_t i, size_t j, size_t value)\n{\n
data_[i +
j * (m_ + 1)] = value;\n}\n \nsize_t getAt(size_t i, size_t j) const\n{\n
ret
urn data_[i + j * (m_ + 1)];\n}\n \ntemplate<typename T> void\nbuild(const T* X,
const T* Y)\n{\n
for (size_t i=0; i<=m_; ++i)\n
setAt(i, 0, 0);\n \n
for (size_t j=0; j<=n_; ++j)\n
setAt(0, j, 0);\n \n
for (size_t i =
0; i < m_; ++i)\n
{\n
for (size_t j = 0; j < n_; ++j)\n
{\n
if (X[i] == Y[j])\n
setAt(i+1, j+1, getAt(i, j)+1);\n
else\n
setAt(i+1, j+1, std::max(getAt(i+1, j), getAt(i,
j+1)));\n
}\n
}\n}",
"public static <E> List<E> LongestCommonSubsequence(E[] s1, E[]
s2)\n{\n
int[][] num = new int[s1.length+1][s2.length+1]; //2D array, in
itialized to 0\n \n
//Actual algorithm\n
for (int i = 1; i <= s1.l
ength; i++)\n
for (int j = 1; j <= s2.length; j++)\n
if (s1[i-1].equals(s2[j-1]))\n
num[i][
j] = 1 + num[i-1][j-1];\n
else\n
num[i][j] = Math.max(num[i-1][j], num[i][j-1]);\n \n
System.out.pr
intln(\"length of LCS = \" + num[s1.length][s2.length]);\n \n
int s1posit
ion = s1.length, s2position = s2.length;\n
List<E> result = new LinkedLis
t<E>();\n \n
while (s1position != 0 && s2position != 0)\n
{\n
if (s1[s1position - 1].equals(s2[s2position - 1]))\n
{
\n
result.add(s1[s1position - 1]);\n
s1position--;\n
s2position--;\n
}\n
else if (num[s1position][s2position - 1] >= num[s1position][s2positi
on])\n
{\n
s2position--;\n
}\n
else\n
{\n
s1position-;\n
}\n
}\n
Collections.reverse(result);\n
r
eturn result;\n}"
],
"key" : 401,
"order" : "O(N2)",
"shortTitle" : "Longest Common Subsequence",
"tileImage" : "images/dynamic/lcs_2.png",
"title" : "Longest Common Subsequence"

},
{ "backgroundImage" : "images/dynamic/cs2.png",
"description" : "Let us discuss Longest Increasing Subsequence (LIS) problem
as an example problem that can be solved using Dynamic Programming.\nThe longes
t Increasing Subsequence (LIS) problem is to find the length of the longest subs
equence of a given sequence such that all elements of the subsequence are sorted
in increasing order. For example, length of LIS for { 10, 22, 9, 33, 21, 50, 41
, 60, 80 } is 6 and LIS is {10, 22, 33, 50, 60, 80}.\n\nOptimal Substructure:\nL
et arr[0..n-1] be the input array and L(i) be the length of the LIS till index i
such that arr[i] is part of LIS and arr[i] is the last element in LIS, then L(i
) can be recursively written as.\nL(i) = { 1 + Max ( L(j) ) } where j < i and ar
r[j] < arr[i] and if there is no such j then L(i) = 1\nTo get LIS of a given arr
ay, we need to return max(L(i)) where 0 < i < n\nSo the LIS problem has optimal
substructure property as the main problem can be solved using solutions to subpr
oblems.\n\nOverlapping Subproblems:\nFollowing is simple recursive implementatio
n of the LIS problem. The implementation simply follows the recursive structure
mentioned above. The value of lis ending with every element is returned using ma
x_ending_here. The overall lis is returned using pointer to a variable max",
"group" : { "backgroundImage" : "images/dynamic/dp_group.jpg",
"description" : "Set of Basic Algorithms related to Dynamic Programming
Concept. Illustrates this concept and give valuable examples",
"groupImage" : "images/dynamic/dynamic_header.png",
"key" : "Dynamic Programming",
"shortTitle" : "Dynamic Programming",
"title" : "Dynamic Programming"
},
"algorithms" : [ "using System;\nusing System.Collections.Generic;\nusing Sy
stem.Linq;\nusing System.Text;\n \nnamespace Algorithms\n{\n
class LongestInc
reasingSequence\n
{\n
public List<int> Calculate(List<int> deck)\n
{\n
List<int> longestIncreasingSequence = new List<int>();\n
List<Stack<LinkedNode<int>>> piles = new List<Stack<LinkedNode<int>>>();\
n
for (int i = 0; i < deck.Count; i++)\n
{\n
int addToPileNumber = DeterminePileToAddNumberTo(deck[i], piles);\n
LinkedNode<int> data = new LinkedNode<int>(deck[i]);\n
if (a
ddToPileNumber == -1) //No suitable pile found. Create a new one\n
{\n
Stack<LinkedNode<int>> newStack = new Stack<LinkedNode<
int>>();\n
piles.Add(newStack);\n
newStack
.Push(data);\n
if (piles.Count > 1)\n
{\n
data.Next = piles[piles.Count - 2].Peek();\n
}\n
}\n
else\n
{\n
if (addToPileNumber > 0)\n
{\n
data.Next=piles[addToPileNumber - 1].Peek();\n
}\n
piles[addToPileNumber].Push(data);\n
}\n
}\n
System.Diagnostics.Debug.WriteLine(\"Total number of piles created wer
e {0}. Therefore the sequence size should be {0}\", piles.Count);\n
r
eturn GetSequenceFromLinkedNodes(piles[piles.Count-1].Peek());\n
}\n \n
private List<int> GetSequenceFromLinkedNodes(LinkedNode<int> LinkedNode)\n
{\n
Stack<int> largestSequenceStack = new Stack<int>();\n
largestSequenceStack.Push(LinkedNode.Data);\n
while (LinkedNo
de.Next != null)\n
{\n
LinkedNode = LinkedNode.Next;\n
largestSequenceStack.Push(LinkedNode.Data);\n
}\n
return largestSequenceStack.ToList();\n
}\n \n
private int
DeterminePileToAddNumberTo(int number, List<Stack<LinkedNode<int>>> piles)\n
{\n
return piles.FindIndex(p => p.Peek().Data > number);\n
}\n
}\n\n
class LinkedNode<T>\n
{\n
public LinkedNode()\n
{ }\n
public LinkedNode(T data)\n
{\n
this.Data = da
ta;\n
}\n
public T Data { get; set; }\n
public LinkedNode<T
> Next { get; set; }\n
}\n}",
"/* lis() returns the length of the longest increasing subsequen
ce in\n
arr[] of size n */\nint lis( int arr[], int n )\n{\n int *lis, i, j

, max = 0;\n lis = (int*) malloc ( sizeof( int ) * n );\n \n /* Initialize L


IS values for all indexes */\n for ( i = 0; i < n; i++ )\n
lis[i] = 1;\n
\n /* Compute optimized LIS values in bottom up manner */\n for ( i = 1;
i < n; i++ )\n
for ( j = 0; j < i; j++ )\n
if ( arr[i] > arr[j] &&
lis[i] < lis[j] + 1)\n
lis[i] = lis[j] + 1;\n
\n /* Pick maximum
of all LIS values */\n for ( i = 0; i < n; i++ )\n
if ( max < lis[i] )\n
max = lis[i];\n \n /* Free memory to avoid memory leak */\n free( l
is );\n \n return max;\n}",
"public static int lis_nlgn(int[] a) {\n
int[] dp = new int[a
.length];\n
int max = -1;\n
for(int x : a) {\n
int pos = Arrays.bin
arySearch(dp, 0, max + 1, x);\n
if(pos < 0) {\n
pos = -pos - 1
;\n
dp[pos] = x;\n
if(pos == max + 1)\n
max
++;\n
}\n
}\n
return max + 1;\n}"
],
"key" : 402,
"order" : "O(n log n)",
"shortTitle" : "Longest Increasing Subsquence",
"tileImage" : "images/dynamic/cs2.png",
"title" : "Longest Increasing Subsequence"
},
{ "backgroundImage" : "images/dynamic/ks.png",
"description" : "Given a set of items, each with a weight and a value, deter
mine which items you should pick to maximize the value while keeping the overall
weight smaller than the limit of your knapsack (i.e., a backpack).\n\nFor examp
le, suppose you are a thief and you invaded a house. Inside you found the follow
ing items:\n\n
A vase that weights 3 pounds and is worth 50 dollars.\n
A s
ilver nugget that weights 6 pounds and is worth 30 dollars.\n
A painting that
weights 4 pounds and is worth 40 dollars.\n
A mirror that weights 5 pounds a
nd is worth 10 dollars.\n\nWhich items should you pick?\n\nSince this is a small
problem set its not difficult to see the answer is the vase and the painting, fo
r a total value of $90, but if there were more items computing the answer wouldnt
be so easy.\n\nA brute force approach (i.e., testing all item combinations and
keeping the one with the highest value) would take 2^n, where n is the number of
items. Once n grows slightly, this approach becomes unfeasible.\n\nWhats a bette
r solution?\n\nWe can break the problem into smaller sub-problems (which is call
ed optimal sub-structure in computer science) and solve it recursively (i.e., di
vide and conquer).\n\nThat is, instead of thinking with all the items at the sam
e time, we think about having only one item and a certain size available in the
knapsack. With this smaller sub-problem youll basically need to decide between tw
o things: to take the item (in which case you get the value of the item but lose
capacity in proportion to its weight) or to not take the item (in which case yo
u dont get any value but dont lose any weight either). Once you solve this sub-pro
blem you just need to call another recursion, adjusting two things: the item you
are working with and the weight you still have available.\n\nOne problem that w
ill arise is the re-computation of sub-problems over and over again (which is ca
lled overlapping sub-problems). That is because the sub-problems are not indepen
dent. When you have this scenario (i.e., optimal sub-structure and overlapping s
ub-problems) you know what you can use the dynamic programming technique, which
basically involved storing the solutions to each sub-problem, so that you just n
eed to compute them once.\n\nEnough talk though, lets see some code.\nTop-Down Dy
namic Programming Solution\n\nTop-down dynamic programming means that well use an
intuitive and recursive algorithm to solve the problem, but instead of simply r
eturning the computer values from our function well first store it in an auxiliar
y table. Then every time we call the recursion we first check the table to see i
f the solution was computed already. If it was we use it, else we compute and st
ore it for future use",
"group" : { "backgroundImage" : "images/dynamic/dp_group.jpg",
"description" : "Set of Basic Algorithms related to Dynamic Programming
Concept. Illustrates this concept and give valuable examples",
"groupImage" : "images/dynamic/dynamic_header.png",

"key" : "Dynamic Programming",


"shortTitle" : "Dynamic Programming",
"title" : "Dynamic Programming"
},
"algorithms" : [ "public List<Item> FindItemsToPack(List<Item> items, int ca
pacity,out int totalValue)\n{\n \n
int[,] price = new int[items.Count + 1,
capacity + 1];\n
bool[,] keep = new bool[items.Count + 1, capacity + 1];\n\n
for (int i = 1; i <= items.Count; i++)\n
{\n
Item currentItem = ite
ms[i - 1];\n
for (int space = 1; space <= capacity; space++)\n
{\n
if (space >= currentItem.Weight)\n
{\n
int
remainingSpace = space - currentItem.Weight;\n
int remainingSpac
eValue = 0;\n
if (remainingSpace > 0)\n
{\n
remainingSpaceValue = price[i - 1, remainingSpace];\n
}\n
int currentItemTotalValue = currentItem.Price + remainingSp
aceValue;\n
if (currentItemTotalValue > price[i - 1, space])\n
{\n
keep[i, space] = true;\n
price[i, space] = currentItemTotalValue;\n
}\n
els
e\n
{\n
keep[i, space] = false;\n
price[i, space] = price[i - 1, space];\n
}\n
}\
n
}\n
}\n\n
List<Item> itemsToBePacked = new List<Item>();\n\n
i
nt remainSpace = capacity;\n
int item = items.Count;\n
while (item > 0)\n
{\n
bool toBePacked = keep[item, remainSpace];\n
if (toBePacked
)\n
{\n
itemsToBePacked.Add(items[item - 1]);\n
rem
ainSpace = remainSpace - items[item - 1].Weight;\n
}\n
item--;\n
}\n\n
totalValue = price[items.Count,capacity];\n
return itemsToBePacked
;\n}",
"#include <stdio.h>\n\n#define max(a,b) (a > b ? a : b)\n\nint m
atrix[100][100] = {0};\nint picks[100][100] = {0};\n\nint knapsack(int nItems, i
nt size, int weights[], int values[]){\n
int i,j;\n
for (i=1;i<=nItems;i++
){\n
for (j=0;j<=size;j++){\n
if (weights[i-1]<=j){\n
matrix[i][j] = max(matrix[i-1][j],values[i-1]+matrix[i-1][j-weights[i-1]]
);\n
if (values[i-1]+matrix[i-1][j-weights[i-1]]>matrix[i-1][j])\
n
picks[i][j]=1;\n
else\n
p
icks[i][j]=-1;\n
}\n
else{\n
picks[i][j] =
-1;\n
matrix[i][j] = matrix[i-1][j];\n
}\n
}\n
}\n
return matrix[nItems][size];\n}",
"public class Knapsack {\n\n
public static void main(String[]
args) {\n
int N = Integer.parseInt(args[0]); // number of items\n
int W = Integer.parseInt(args[1]); // maximum weight of knapsack\n\n
int[] profit = new int[N+1];\n
int[] weight = new int[N+1];\n\n
/
/ generate random instance, items 1..N\n
for (int n = 1; n <= N; n++) {\n
profit[n] = (int) (Math.random() * 1000);\n
weight[n] = (
int) (Math.random() * W);\n
}\n\n
// opt[n][w] = max profit of pac
king items 1..n with weight limit w\n
// sol[n][w] = does opt solution to
pack items 1..n with weight limit w include item n?\n
int[][] opt = new
int[N+1][W+1];\n
boolean[][] sol = new boolean[N+1][W+1];\n\n
for
(int n = 1; n <= N; n++) {\n
for (int w = 1; w <= W; w++) {\n\n
int option1 = opt[n-1][w];\n\n
// dont take item n\n
// take item n\n
int option2 = Integer.MIN_VALUE;\n
if (weight[n] <= w) option2 = profit[n] + opt[n-1][w-weight[n]];
\n\n
// select better of two options\n
opt[n][w] =
Math.max(option1, option2);\n
sol[n][w] = (option2 > option1);\n
}\n
}\n\n
// determine which items to take\n
bo
olean[] take = new boolean[N+1];\n
for (int n = N, w = W; n > 0; n--) {\n
if (sol[n][w]) { take[n] = true; w = w - weight[n]; }\n
else
{ take[n] = false;
}\n
}\n\n
//
print results\n
System.out.println(\"item\" + \"\t\" + \"profit\" + \"\t\
" + \"weight\" + \"\t\" + \"take\");\n
for (int n = 1; n <= N; n++) {\n
System.out.println(n + \"\t\" + profit[n] + \"\t\" + weight[n] + \"\t\
" + take[n]);\n
}\n
}\n}"

],
"key" : 403,
"order" : "O(n ^ 2)",
"shortTitle" : "Knapsack Problem",
"tileImage" : "images/dynamic/ks.png",
"title" : "Knapsack Problem"
},
{ "backgroundImage" : "images/dynamic/ks0.gif",
"description" : "0-1 knapsack problem: The setup is the same, but the items
may not be broken into smaller pieces, so thief may decide either to take an ite
m or to leave it (binary choice), but may not take a fraction of an item\n\n0-1
knapsack problem:\n
Exhibit No greedy choice property.\n
No greedy
algorithm exists.\n
Exhibit optimal substructure property.\n
Only dynamic
programming algorithm exists.\n\nDynamic-Programming Solution to the 0-1 Knapsa
ck Problem:\nLet i be the highest-numbered item in an optimal solution S for W p
ounds. Then S` = S - {i} is an optimal solution for W - wi pounds and the value
to the solution S is Vi plus the value of the subproblem.\n\nWe can express this
fact in the following formula: define c[i, w] to be the solution for items 1,2
, . . . , i and maximum weight w. Then\n\n
0
if i = 0 or w = 0\
n
c[i,w] =
c[i-1, w]
if wi >= 0\n
max [vi +
c[i-1, w-wi], c[i-1, w]}
if i>0 and w >= wi\n\nThis says that the value of
the solution to i items either include ith item, in which case it is vi plus a
subproblem solution for (i - 1) items and the weight excluding wi, or does not i
nclude ith item, in which case it is a subproblems solution for (i - 1) items a
nd the same weight. That is, if the thief picks item i, thief takes vi value, an
d thief can choose from items w - wi, and get c[i - 1, w - wi] additional value.
On other hand, if thief decides not to take item i, thief can choose from item
1,2, . . . , i- 1 upto the weight limit w, and get c[i - 1, w] value. The better
of these two choices should be made.\n\nAlthough the 0-1 knapsack problem, the
above formula for c is similar to LCS formula: boundary values are 0, and other
values are computed from the input and \"earlier\" values of c. So the 0-1 knaps
ack algorithm is like the LCS-length algorithm given in CLR for finding a longes
t common subsequence of two sequences.\n\nThe algorithm takes as input the maxim
um weight W, the number of items n, and the two sequences v = <v1, v2, . . . , v
n> and w = <w1, w2, . . . , wn>. It stores the c[i, j] values in the table, that
is, a two dimensional array, c[0 . . n, 0 . . w] whose entries are computed in
a row-major order. That is, the first row of c is filled in from left to right,
then the second row, and so on. At the end of the computation, c[n, w] contains
the maximum value that can be picked into the knapsack.\n\nAnalysis\n\nThis dyna
mic-0-1-kanpsack algorithm takes O(nw) times, broken up as follows: (nw) times to
fill the c-table, which has (n +1).(w +1) entries, each reuiring (1) time to co
mpute. O(n) time to trace the solution, because the tracing process starts in ro
w n of the table and moves up 1 row at each step.",
"group" : { "backgroundImage" : "images/dynamic/dp_group.jpg",
"description" : "Set of Basic Algorithms related to Dynamic Programming
Concept. Illustrates this concept and give valuable examples",
"groupImage" : "images/dynamic/dynamic_header.png",
"key" : "Dynamic Programming",
"shortTitle" : "Dynamic Programming",
"title" : "Dynamic Programming"
},
"algorithms" : [ "public static void Run()\n{\n
var n = I.Length;
\n
for (var i = 1; i <= n; i++)\n
{\n
for (var j = 0; j <= W; j+
+)\n
{\n
if (I[i - 1].w <= j)\n
{\n
M[i][j] = Max(M[i - 1][j], I[i - 1].v + M[i - 1][j - I[i - 1].w]);\n
if (I[i - 1].v + M[i - 1][j - I[i - 1].w] > M[i - 1][j])\n
{\
n
P[i][j] = 1;\n
}
\n
else\n
{\n
P[i][j] = -1;\n
}\n
}\n
else\n
{\n
P[i][j] = -1;\n
M[i][j] = M[i - 1][j];\n
}\n
}

\n

}\n

MaxValue = M[n][W];\n}",
"#include<iostream>\n#include<stdio.h>\n\nvoid knapsack(int n,in
t W);\nint n,i,w,W;\nint weight[50],v[50];\nint C[50][50];\n\nvoid knapsack(int
n,int W)\n{\n
for(i = 1; i <= n; i++){\n
C[i][0] = 0;\n
}\n
for(
i=1;i<=n;i++)\n
{\n
for(w=0;w<=W;w++)\n
if(weight[i]<=w)
//item can be put in knapsack\n
if(v[i]+C
[i-1][w-weight[i]]>C[i-1][w])\n
C[i][w]=v[i]+C[i-1][w-weight[i]];\n
else\n
C[i][w]=C[i-1][w];\n
else\n
C[
i][w]=C[i-1][w];
// w[i]>w\n
}\n
cout<<C[i][w];\n}",
"public class Knapsack {\n
int[] benefit;\n
int[] weight;\
n
int N;\n
char[][] path;\n\n
int ks(int maxWeight) {\n
int[][]
best = new int[N + 1][maxWeight + 1];\n
for (int i = 1; i <= N; i++)\n
for (int w = 1; w <= maxWeight; w++) {\n
if (weight[i 1] > w) {\n
best[i][w] = best[i - 1][w];\n
path[i][w] = 'd';\n
} else {\n
int a = best[i
- 1][w];\n
int b = best[i - 1][w - weight[i - 1]] + benefit[
i - 1];\n
if (a > b) {\n
best[i][w] =
a;\n
path[i][w] = 'd';\n
} else {\n
best[i][w] = b;\n
path[i][w] = 'b';\
n
}\n
}\n
}\n
return best[N]
[maxWeight];\n
}\n\n
void print(int i, int j, int maxWeight) {\n
if
(i == 0)\n
return;\n
if (path[i][j] == 'd') {\n
pr
int(i - 1, j, maxWeight);\n
} else if (path[i][j] == 'b') {\n
print(i - 1, maxWeight - weight[i - 1], maxWeight\n
- weight[
i - 1]);\n
System.out.print(weight[i - 1] + \" \");\n
}\n
return;\n
}\n}"
],
"key" : 404,
"order" : "O(n w)",
"shortTitle" : "0-1 Knapsack Problem",
"tileImage" : "images/dynamic/ks0.gif",
"title" : "0-1 Knapsack Problem"
},

{ "backgroundImage" : "images/strings/z.png",
"description" : "Given a string S of length n, the Z Algorithm produces an a
rray Z where Z[i] is the length of the longest substring starting from S[i] whic
h is also a prefix of S, i.e. the maximum k such that S[j] = S[i + j] for all 0
<= j < k. Note that Z[i] = 0 means that S[0] != S[i]. For easier terminology, we
will refer to substrings which are also a prefix as prefix-substrings.\n\nThe a
lgorithm relies on a single, crucial invariant. As we iterate over the letters i
n the string (index i from 1 to n - 1), we maintain an interval [L, R] which is
the interval with maximum R such that 1 <= L <= i <= R and S[L...R] is a prefixsubstring (if no such interval exists, just let L = R = - 1). For i =1, we can s
imply compute L and R by comparing S[0...] to S[1...]. Moreover, we also get Z[1
] during this.\n\nNow suppose we have the correct interval [L, R] for i - 1 and
all of the Z values up to i - 1. We will compute Z[i] and the new [L, R] by the
following steps:\n\n
If i > R, then there does not exist a prefix-substring o
f S that starts before i and ends at or after i. If such a substring existed, [L

, R] would have been the interval for that substring rather than its current val
ue. Thus we \"reset\" and compute a new [L, R] by comparing S[0...] to S[i...] a
nd get Z[i] at the same time (Z[i] = R - L + 1).\n
Otherwise, i <= R, so the
current [L, R] extends at least to i. Let k = i - L. We know that Z[i] >= min(Z[
k], R - i + 1) because S[i...] matches S[k...] for at least R - i + 1 characters
(they are in the [L, R] interval which we know to be a prefix-substring). Now w
e have a few more cases to consider.\n
If Z[k] < R - i + 1, then there is no
longer prefix-substring starting at S[i] (or else Z[k] would be larger), meaning
Z[i] = Z[k] and [L, R] stays the same. The latter is true because [L, R] only c
hanges if there is a prefix-substring starting at S[i] that extends beyond R, wh
ich we know is not the case here.\n
If Z[k] >= R - i + 1, then it is possible
for S[i...] to match S[0...] for more than R - i + 1 characters (i.e. past posi
tion R). Thus we need to update [L, R] by setting L = i and matching from S[R +
1] forward to obtain the new R. Again, we get Z[i] during this.\n\nThe process c
omputes all of the Z values in a single pass over the string, so we're done. Cor
rectness is inherent in the algorithm and is pretty intuitively clear.\n\nAnalys
is\nWe claim that the algorithm runs in O(n) time, and the argument is straightf
orward. We never compare characters at positions less than R, and every time we
match a character R increases by one, so there are at most n comparisons there.
Lastly, we can only mismatch once for each i (it causes R to stop increasing), s
o that's another at most n comparisons, giving O(n) total.",
"group" : { "backgroundImage" : "images/strings/string_group.gif",
"description" : "Set of Basic Algorithms related to string processing an
d matching",
"groupImage" : "images/strings/strings_header.png",
"key" : "String Processing",
"shortTitle" : "String Processing",
"title" : "String Processing"
},
"algorithms" : [ "NOT FOUND",
"int L = 0, R = 0;\nfor (int i = 1; i < n; i++) {\n if (i > R)
{\n
L = R = i;\n
while (R < n && s[R-L] == s[R]) R++;\n
z[i] = R-L; R-;\n } else {\n
int k = i-L;\n
if (z[k] < R-i+1) z[i] = z[k];\n
else {\
n
L = i;\n
while (R < n && s[R-L] == s[R]) R++;\n
z[i] = R-L; R-;\n
}\n }\n}\n\nint maxz = 0, res = 0;\nfor (int i = 1; i < n; i++) {\n if
(z[i] == n-i && maxz >= n-i) { res = n-i; break; }\n maxz = max(maxz, z[i]);\n}
",
"public int findPrefixLength (char [] text, int a, int b)\n{\n
int len = 0;\n
for(int i = 0; i + a < text.length && i + b < text.length; i
++)\n
{\n
if (text[i + a] == text[i + b]) len++;\n
else break;\
n
}\n
return len;\n}\n \npublic void calcZbox (String word)\n{\n
text
= word.toCharArray ();\n
z = new int [text.length];\n
int l=0;\n
int r
=0;\n \n
if (z.length > 1) z[1] = findPrefixLength (text, 0, 1);\n
else
return;\n \n
if (z[1] > 0)\n
{\n
l = 1;\n
r = z[1];\n
}
\n \n
for (int j = 2; j < text.length; j++)\n
{\n
if (j > r) //ca
se 1\n
{\n
z[j] = findPrefixLength (text, 0, j);\n
if (z[j] > 0)\n
{\n
l = j;\n
r = j + z[
j] - 1;\n
}\n
} \n
else //case 2\n
{\n
int k = j - l;\n
if (z[k] < r - j + 1) //case 2a\n
{\n
z[j] = z[k];\n
}\n
else //case 2b\n
{\n
int p = findPrefixLength (text, r - j + 1, r + 1);\n
z[j] = r - j + 1 + p;\n
l = j;\n
r = j
+ z[j] - 1;\n
}\n
}\n
}\n}"
],
"key" : 501,
"order" : "O(|S|)",
"shortTitle" : "Z Algorithm",
"tileImage" : "images/strings/z.png",
"title" : "Z Algorithm"
}

You might also like