-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata.js
More file actions
12 lines (10 loc) · 262 KB
/
data.js
File metadata and controls
12 lines (10 loc) · 262 KB
1
2
3
4
5
6
7
8
9
10
11
12
//import utils from './utils';
//
//module.exports = utils.readFile('./parsed.json', function (json) {
// console.log('parsed json!!!!!!!!', json)
//});
let parse = function () {
};
module.exports = {
cards: [{"answer": "For simplicity, assume char set is ASCII (if not, we need to increase the storage size. The rest \nof the logic would be the same). \nNOTE\n: This is a \ngreat\n thing to point out to your interviewer!\n1 public static boolean isUniqueChars2(String str) {\n2 boolean[] char_set = new boolean[256];\n3 for (int i = 0; i < str.length(); i++) {\n4 int val = str.charAt(i);\n5 if (char_set[val]) return false;\n6 char_set[val] = true;\n7 }\n8 return true;\n9 }\nTime complexity is O(n), where n is the length of the string, and space complexity is O(n).\nWe can reduce our space usage a little bit by using a \nbit vector. We will assume, in the below \ncode, that the string is only lower case \u201aa\u2019 through \u201az\u2019. This will allow us to use just a single int\n1 public static boolean isUniqueChars(String str) {\n2 int checker = 0;\n3 for (int i = 0; i < str.length(); ++i) {\n4 int val = str.charAt(i) - \u201aa\u2019;\n5 if ((checker & (1 << val)) > 0) return false;\n6 checker \n7 }\n8 return true;\n9 }\nAlternatively, we could do the following:\n1. Check every char of the string with every other char of the string for duplicate occur-rences. This will take \nO(n^2) time and no space.\n2. If we are allowed to destroy the input string, we could sort the string in O(n log n) time \nand then linearly check the string for neighboring characters that are identical. Care-ful, though - many sorting algorithms take up extra space.", "question": "Implement an algorithm to determine if a \nstring has all unique characters. What if \nyou can not use additional data structures?", "id": 0}, {"answer": "This is a classic interview question. The only \u201cgotcha\u201d is to try to do it in place, and to be care-ful for the null character.\n1 void reverse(char *str) {\n2 char * end = str;\n3 char tmp;\n4 if (str) {\n5 while (*end) {\n6 ++end;\n7 }\n8 --end;\n9 while (str < end) {\n10 tmp = *str;\n11 *str++ = *end;\n12 *end-- = tmp;\n13 }\n14 }\n15 }", "question": "Write code to reverse a C-Style \nString. (C-String means that \u201cabcd\u201d is represented as \nfive characters, including the null character.)", "id": 2}, {"answer": "First, ask yourself, what does the interviewer mean by an additional buffer? Can we use an \nadditional array of constant size? \nAlgorithm\u2014No (Large) Additional Memory:\n1. For each character, check if it is a duplicate of already found characters.\n2. Skip duplicate characters and update the non duplicate characters.\nTime complexity is \nO(N2\n).\n1 public static void removeDuplicates(char[] str) {\n2 if (str == null) return;\n3 int len = str.length;\n4 if (len < 2) return;\n5 \n6 int tail = 1;\n7 \n8 for (int i = 1; i < len; ++i) {\n9 int j;\n10 for (j = 0; j < tail; ++j) {\n11 if (str[i] == str[j]) break;\n12 }\n13 if (j == tail) {\n14 str[tail] = str[i];\n15 ++tail;\n16 }\n17 }\n18 str[tail] = 0;\n19 }\nTest Cases:\n1. String does not contain any duplicates, e.g.: abcd\n2. String contains all duplicates, e.g.: aaaa\n3. Null string\n4. String with all continuous duplicates, e.g.: aaabbb\n\n5. String with non-contiguous duplicate, e.g.: abababa\nAlgorithm\u2014With Additional Memory of Constant Size \n1 public static void removeDuplicatesEff(char[] str) {\n2 if (str == null) return;\n3 int len = str.length;\n4 if (len < 2) return;\n5 boolean[] hit = new boolean[256];\n6 for (int i = 0; i < 256; ++i) {\n7 hit[i] = false;\n8 }\n9 hit[str[0]] = true;\n10 int tail = 1;\n11 for (int i = 1; i < len; ++i) {\n12 if (!hit[str[i]]) {\n13 str[tail] = str[i];\n14 ++tail;\n15 hit[str[i]] = true;\n16 }\n17 }\n18 str[tail] = 0;\n19 }\n\n1. String does not contain any duplicates, e.g.: abcd\n2. String contains all duplicates, e.g.: aaaa\n3. Null string\n4. Empty string\n5. String with all continuous duplicates, e.g.: aaabbb\n6. String with non-contiguous duplicates, e.g.: abababa", "question": "Design an algorithm and write code to remove the duplicate characters in a \nstring \nwithout using any additional buffer. NOTE: One or two additional variables are fine. \nAn extra copy of the array is not.\nFOLLOW UP\nWrite the \ntest cases for this method.", "id": 4}, {"answer": "There are two easy ways to solve this problem:\nSolution #1: Sort the strings\n1 boolean anagram(String s, String t) {\n2 return sort(s) == sort(t);\n3 }\nSolution #2: Check if the two strings have identical counts for each unique char.\n1 public static boolean anagram(String s, String t) {\n2 if (s.length() != t.length()) return false;\n3 int[] letters = new int[256];\n4 int num_unique_chars = 0;\n5 int num_completed_t = 0;\n6 char[] s_array = s.toCharArray();\n7 for (char c : s_array) { // count number of each char in s.\n8 if (letters[c] == 0) ++num_unique_chars;\n9 ++letters[c];\n10 }\n11 for (int i = 0; i < t.length(); ++i) {\n12 int c = (int) t.charAt(i);\n13 if (letters[c] == 0) { // Found more of char c in t than in s.\n14 return false;\n15 }\n16 --letters[c];\n17 if (letters[c] == 0) {\n18 ++num_completed_t;\n19 if (num_completed_t == num_unique_chars) {\n20 // it\u2019s a match if t has been processed completely\n21 return i == t.length() - 1;\n22 }\n23 }\n24 }\n25 return false;\n26 }", "question": "Write a method to decide if two \nstrings are anagrams or not.", "id": 6}, {"answer": "The algorithm is as follows:\n1. Count the number of spaces during the first scan of the string.\n2. Parse the string again from the end and for each character:\n \n\u00bb If a space is encountered, store \u201c%20\u201d. \n \n\u00bb Else, store the character as it is in the newly shifted location.\n \n1 public static void ReplaceFun(char[] str, int length) {\n2 int spaceCount = 0, newLength, i = 0;\n3 for (i = 0; i < length; i++) {\n4 if (str[i] == \u201a \u201a) {\n5 spaceCount++;\n6 }\n7 }\n8 newLength = length + spaceCount * 2;\n9 \n\n10 for (i = length - 1; i >= 0; i--) {\n11 if (str[i] == \u201a \u201a) {\n12 str[newLength - 1] = \u201a0\u2019;\n13 str[newLength - 2] = \u201a2\u2019;\n14 str[newLength - 3] = \u201a%\u2019;\n15 newLength = newLength - 3;\n16 } else {\n17 str[newLength - 1] = str[i];\n18 newLength = newLength - 1;\n19 }\n20 }\n21 }", "question": "Write a method to replace all spaces in a \nstring with \u201a%20\u2019.", "id": 8}, {"answer": "The rotation can be performed in layers, where you perform a cyclic swap on the edges on \neach layer. In the first for loop, we rotate the first layer (outermost edges). We rotate the \nedges by doing a four-way swap first on the corners, then on the element clockwise from the \nedges, then on the element three steps away.\nOnce the exterior elements are rotated, we then rotate the interior region\u2019s edges.\n1 public static void rotate(int[][] matrix, int n) {\n2 for (int layer = 0; layer < n / 2; ++layer) {\n3 \n4 int last = n - 1 - layer;\n5 \n6 \n7 \n8 // left -> top\n9 \n10 \n11 // bottom -> left\n12 \n13 \n14 // right -> bottom\n15 matrix[last][last - offset] = matrix[i][last]; \n16 \n17 // top -> right\n18 matrix[i][last] = top; // right <- saved top\n19 }\n20 }\n21 }", "question": "Given an image represented by an NxN \nmatrix, where each pixel in the image is 4 \nbytes, write a method to rotate the image by 90 degrees. Can you do this in place?", "id": 10}, {"answer": "At first glance, this problem seems easy: just iterate through the matrix and every time we \nsee a 0, set that row and column to 0. There\u2019s one problem with that solution though: we \nwill \u201crecognize\u201d those 0s later on in our iteration and then set their row and column to zero. \nPretty soon, our entire matrix is 0s!\nOne way around this is to keep a second matrix which flags the 0 locations. We would then \ndo a second pass through the matrix to set the zeros. This would take O(MN) space.\nDo we really need \nO(MN) space? No. Since we\u2019re going to set the entire row and column to \nzero, do we really need to track \nwhich \ncell in a row is zero? No. We only need to know that \nrow 2, for example, has a zero. \nThe code below implement this algorithm. We keep track in two \narrays all the rows with \nzeros and all the columns with zeros. We then make a second pass of the matrix and set a cell \nto zero if its row or column is zero.\n1 public static void setZeros(int[][] matrix) {\n2 int[] row = new int[matrix.length]; \n3 int[] column = new int[matrix[0].length];\n4 // Store the row and column index with value 0\n5 for (int i = 0; i < matrix.length; i++) {\n6 for (int j = 0; j < matrix[0].length;j++) {\n7 if (matrix[i][j] == 0) {\n8 row[i] = 1; \n9 column[j] = 1;\n10 }\n11 }\n12 }\n13 \n14 // Set arr[i][j] to 0 if either row i or column j has a 0\n15 for (int i = 0; i < matrix.length; i++) {\n16 for (int j = 0; j < matrix[0].length; j++) {\n17 if ((row[i] == 1 \n18 matrix[i][j] = 0;\n19 }\n20 }\n21 }\n22 }", "question": "Write an algorithm such that if an element in an MxN \nmatrix is 0, its entire row and \ncolumn is set to 0.", "id": 12}, {"answer": "Just do the following checks\n1. Check if length(s1) == length(s2). If not, return false.\n2. Else, concatenate s1 with itself and see whether s2 is substring of the result. \n input: s1 = apple, s2 = pleap ==> apple is a substring of pleappleap \n input: s1 = apple, s2 = ppale ==> apple is not a substring of ppaleppale\n1 public static boolean isRotation(String s1, String s2) {\n2 int len = s1.length();\n3 /* check that s1 and s2 are equal length and not empty */\n4 if (len == s2.length() && len > 0) { \n5 /* concatenate s1 and s1 within new buffer */\n6 String s1s1 = s1 + s1;\n7 return isSubstring(s1s1, s2);\n8 }\n9 return false;\n10 }", "question": "Assume you have a method isSubstring which checks if one word is a substring of \nanother. Given two \nstrings, s1 and s2, write code to check if s2 is a rotation of s1 using \nonly one call to isSubstring (i.e., \u201cwaterbottle\u201d is a rotation of \u201cerbottlewat\u201d).", "id": 14}, {"answer": "If we can use a buffer, we can keep track of elements in a hashtable and remove any dups:\n1 public static void deleteDups(LinkedListNode n) {\n2 Hashtable table = new Hashtable();\n3 LinkedListNode previous = null;\n4 while (n != null) {\n5 if (table.containsKey(n.data)) previous.next = n.next;\n6 else {\n7 table.put(n.data, true);\n8 previous = n;\n9 }\n10 n = n.next;\n11 }\n12 }\nWithout a buffer, we can iterate with two pointers: \u201ccurrent\u201d does a normal iteration, while \n\u201crunner\u201d iterates through all prior nodes to check for dups. Runner will only see one dup \nper node, because if there were multiple duplicates they would have been removed already.\n1 public static void deleteDups2(LinkedListNode head) {\n2 if (head == null) return;\n3 LinkedListNode previous = head;\n4 LinkedListNode current = previous.next;\n5 while (current != null) {\n6 LinkedListNode runner = head;\n7 while (runner != current) { // Check for earlier dups\n8 if (runner.data == current.data) {\n9 LinkedListNode tmp = current.next; // remove current\n10 previous.next = tmp; \n11 current = tmp; // update current to next node\n12 break; // all other dups have already been removed\n13 }\n14 runner = runner.next;\n15 }\n16 if (runner == current) { // current not updated - update now\n17 previous = current;\n18 current = current.next;\n19 }\n20 }\n21 }", "question": "Write code to remove duplicates from an unsorted \nlinked list.\nFOLLOW UP\nHow would you solve this problem if a temporary buffer is not allowed?", "id": 16}, {"answer": "Note: This problem screams \nrecursion, but we\u2019ll do it a difierent way because it\u2019s \ntrickier. In a question like this, expect follow up questions about the advantages \nof recursion vs iteration.\nAssumption: The minimum number of nodes in list is \nn\n.\nAlgorithm:\n1. Create two pointers, p1 and p2, that point to the beginning of the node.\n2. Increment p2 by n-1 positions, to make it point to the nth node from the beginning (to \nmake the distance of n between p1 and p2).\n3. Check for p2->next == null if yes return value of \np1\n, otherwise increment \np1\n and \np2\n. \nIf next of \np2\n is null it means \np1\n points to the nth node from the last as the distance \nbetween the two is \nn\n.\n4. Repeat Step 3.\n \n1 LinkedListNode nthToLast(LinkedListNode head, int n) {\n2 if (head == null \n3 return null;\n4 }\n5 LinkedListNode p1 = head;\n6 LinkedListNode p2 = head;\n7 for (int j = 0; j < n - 1; ++j) { // skip n-1 steps ahead\n8 if (p2 == null) {\n9 return null; // not found since list size < n\n10 }\n11 p2 = p2.next;\n12 }\n13 while (p2.next != null) {\n14 p1 = p1.next;\n15 p2 = p2.next;\n16 }\n17 return p1;\n18 }", "question": "Implement an algorithm to find the nth to last element of a singly \nlinked list.", "id": 18}, {"answer": "The solution to this is to simply copy the data from the next node into this node and then \ndelete the next node.\nNOTE: This problem can not be solved if the node to be deleted is the last node \nin the linked list. That\u2019s ok\u2014your interviewer wants to see you point that out. You \ncould consider marking it as dummy in that case. This is an issue you should dis-cuss with your interviewer.\n1 public static boolean deleteNode(LinkedListNode n) {\n2 if (n == null \n3 return false; // Failure\n4 } \n5 LinkedListNode next = n.next; \n6 n.data = next.data; \n7 n.next = next.next; \n8 return true;\n9 }", "question": "Implement an algorithm to delete a node in the middle of a single \nlinked list, given \nonly access to that node.\nEXAMPLE\nInput: the node \u201ac\u2019 from the linked list a->b->c->d->e\nResult: nothing is returned, but the new linked list looks like a->b->d->e", "id": 20}, {"answer": "We can implement this recursively by adding node by node, just as we would digit by digit.\n1. result.data = (node1 + node2 + any earlier carry) % 10\n2. if node1 + node2 > 10, then carry a 1 to the next addition.\n3. add the tails of the two nodes, passing along the carry.\n1 LinkedListNode addLists(LinkedListNode l1, LinkedListNode l2, \n2 int carry) {\n3 if (l1 == null && l2 == null) {\n4 return null;\n5 }\n6 LinkedListNode result = new LinkedListNode(carry, null, null);\n7 int value = carry;\n8 if (l1 != null) {\n9 value += l1.data;\n10 }\n11 if (l2 != null) {\n12 value += l2.data;\n13 }\n14 result.data = value % 10;\n15 LinkedListNode more = addLists(l1 == null ? null : l1.next, \n16 l2 == null ? null : l2.next, \n17 value > 10 ? 1 : 1);\n18 result.setNext(more);\n19 return result;\n20 }", "question": "You have two numbers represented by a \nlinked list, where each node contains a sin-gle digit. The digits are stored in reverse order, such that the 1\u2019s digit is at the head of \nthe list. Write a function that \nadds the two numbers and returns the sum as a linked \nlist.\nEXAMPLE\nInput: (3 -> 1 -> 5), (5 -> 9 -> 2)\nOutput: 8 -> 0 -> 8", "id": 22}, {"answer": "If we move two pointers, one with speed 1 and another with speed 2, they will end up meet-ing if the linked list has a loop. Why? Think about two cars driving on a track\u2014the faster car \nwill always pass the slower one!\nThe tricky part here is finding the start of the loop. Imagine, as an analogy, two people rac-ing around a track, one running twice as fast as the other. If they start off at the same place, \nwhen will they next meet? They will next meet at the start of the next lap.\nNow, let\u2019s suppose Fast Runner had a head start of k meters on an n step lap. When will \nthey next meet? They will meet k meters before the start of the next lap. (Why? Fast Runner \nwould have made k + 2(n - k) steps, including its head start, and Slow Runner would have \nmade n - k steps. Both will be k steps before the start of the loop.)\nNow, going back to the problem, when Fast Runner (n2) and Slow Runner (n1) are moving \naround our circular linked list, n2 will have a head start on the loop when n1 enters. Specifi-cally, it will have a head start of k, where k is the number of nodes before the loop. Since n2 \nhas a head start of k nodes, n1 and n2 will meet k nodes before the start of the loop.\nSo, we now know the following:\n1. Head is k nodes from LoopStart (by definition).\n2. MeetingPoint for n1 and n2 is k nodes from LoopStart (as shown above).\nThus, if we move n1 back to Head and keep n2 at MeetingPoint, and move them both at the \nsame pace, they will meet at LoopStart.\n\n1 LinkedListNode FindBeginning(LinkedListNode head) {\n2 LinkedListNode n1 = head;\n3 LinkedListNode n2 = head; \n4 \n5 // Find meeting point\n6 while (n2.next != null) { \n7 n1 = n1.next; \n8 n2 = n2.next.next; \n9 if (n1 == n2) { \n10 break; \n11 }\n12 }\n13 \n14 // Error check - there is no meeting point, and therefore no loop\n15 if (n2.next == null) {\n16 return null;\n17 }\n18 \n19 /* Move n1 to Head. Keep n2 at Meeting Point. Each are k steps\n20 /* from the Loop Start. If they move at the same pace, they must\n21 * meet at Loop Start. */\n22 n1 = head; \n23 while (n1 != n2) { \n24 n1 = n1.next; \n25 n2 = n2.next; \n26 }\n27 // Now n2 points to the start of the loop.\n28 return n2;\n29 }\nn1 and n2 will meet here, 3 \nnodes from start of loop", "question": "Given a circular \nlinked list, implement an algorithm which returns node at the begin-ning of the loop.\nDEFINITION\nCircular linked list: A (corrupt) linked list in which a node\u2019s next pointer points to an \nearlier node, so as to make a loop in the linked list.\nEXAMPLE\nInput: A -> B -> C -> D -> E -> C [the same C as earlier]\nOutput: C", "id": 24}, {"answer": "Approach 1:\n \nDivide the array in three equal parts and allow the individual stack to grow in that limited \nspace (note: \u201c[\u201c means inclusive, while \u201c(\u201c means exclusive of the end point).\n \n\u00bb for stack 1, we will use [0, n/3)\n \n\u00bb for stack 2, we will use [n/3, 2n/3)\n \n\u00bb for stack 3, we will use [2n/3, n)\nThis solution is based on the assumption that we do not have any extra information about \nthe usage of space by individual stacks and that we can\u2019t either modify or use any extra \nspace. With these constraints, we are left with no other choice but to divide equally.\n1 int stackSize = 300;\n2 int[] buffer = new int [stackSize * 3];\n3 int[] stackPointer = {0, 0, 0}; // stack pointers to track top elem\n4 \n5 void push(int stackNum, int value) {\n6 /* Find the index of the top element in the array + 1, and\n7 * increment the stack pointer */\n8 int index = stackNum * stackSize + stackPointer[stackNum] + 1;\n9 stackPointer[stackNum]++;\n10 buffer[index] = value; \n11 }\n12 \n13 int pop(int stackNum) {\n14 int index = stackNum * stackSize + stackPointer[stackNum];\n15 stackPointer[stackNum]--;\n16 int value = buffer[index];\n17 buffer[index]=0;\n18 return value;\n19 }\n20 \n21 int peek(int stackNum) {\n22 int index = stackNum * stackSize + stackPointer[stackNum];\n23 return buffer[index];\n24 }\n25 \n26 boolean isEmpty(int stackNum) {\n27 return stackPointer[stackNum] == stackNum*stackSize;\n28 }\n\nApproach 2: \nIn this approach, any stack can grow as long as there is any free space in the array.\nWe sequentially allocate space to the stacks and we link new blocks to the previous block. \nThis means any new element in a stack keeps a pointer to the previous top element of that \nparticular stack.\nIn this implementation, we face a problem of unused space. For example, if a stack deletes \nsome of its elements, the deleted elements may not necessarily appear at the end of the ar-ray. So, in that case, we would not be able to use those newly freed spaces.\nTo overcome this deficiency, we can maintain a free list and the whole array space would be \ngiven initially to the free list. For every insertion, we would delete an entry from the free list. \nIn case of deletion, we would simply add the index of the free cell to the free list.\nIn this implementation we would be able to have flexibility in terms of variable space utiliza-tion but we would need to increase the space complexity.\n1 int stackSize = 300;\n2 int indexUsed = 0;\n3 int[] stackPointer = {-1,-1,-1};\n4 StackNode[] buffer = new StackNode[stackSize * 3];\n5 void push(int stackNum, int value) {\n6 int lastIndex = stackPointer[stackNum];\n7 stackPointer[stackNum] = indexUsed;\n8 indexUsed++;\n9 buffer[stackPointer[stackNum]]=new StackNode(lastIndex,value);\n10 }\n11 int pop(int stackNum) {\n12 int value = buffer[stackPointer[stackNum]].value;\n13 int lastIndex = stackPointer[stackNum];\n14 stackPointer[stackNum] = buffer[stackPointer[stackNum]].previous;\n15 buffer[lastIndex] = null;\n16 indexUsed--;\n17 return value;\n18 }\n19 int peek(int stack) { return buffer[stackPointer[stack]].value; }\n20 boolean isEmpty(int stackNum) { return stackPointer[stackNum] == -1; }\n21 \n22 class StackNode {\n23 public int previous;\n24 public int value;\n25 public StackNode(int p, int v){\n26 value = v;\n27 previous = p;\n28 }\n29 }", "question": "Describe how you could use a single \narray to implement three \nstacks.", "id": 26}, {"answer": "You can implement this by having each node in the \nstack keep track of the minimum be-neath itself. Then, to find the min, you just look at what the top element thinks is the min. \nWhen you push an element onto the stack, the element is given the current minimum. It sets \nits \u201clocal min\u201d to be the min\n.\n1 public class StackWithMin extends Stack<NodeWithMin> {\n2 public void push(int value) {\n3 int newMin = Math.min(value, min());\n4 super.push(new NodeWithMin(value, newMin));\n5 }\n6 \n7 public int min() {\n8 if (this.isEmpty()) {\n9 return Integer.MAX_VALUE;\n10 } else {\n11 return peek().min;\n12 }\n13 }\n14 }\n15 \n16 class NodeWithMin {\n17 public int value;\n18 public int min;\n19 public NodeWithMin(int v, int min){\n20 value = v;\n21 this.min = min;\n22 }\n23 }\nThere\u2019s just one issue with this: if we have a large stack, we waste a lot of space by keeping \ntrack of the min for every single element. Can we do better?\nWe can (maybe) do a bit better than this by using an additional stack which keeps track of \nthe mins.\n1 public class StackWithMin2 extends Stack<Integer> {\n2 Stack<Integer> s2;\n3 public StackWithMin2() {\n4 s2 = new Stack<Integer>(); \n\n5 }\n6 public void push(int value){\n7 if (value <= min()) {\n8 s2.push(value);\n9 }\n10 super.push(value);\n11 }\n12 public Integer pop() {\n13 int value = super.pop();\n14 if (value == min()) {\n15 s2.pop(); \n16 }\n17 return value;\n18 }\n19 public int min() {\n20 if (s2.isEmpty()) {\n21 return Integer.MAX_VALUE;\n22 } else {\n23 return s2.peek();\n24 }\n25 }\n26 }\nWhy might this be more space e\u02ddcient? If many elements have the same local min, then \nwe\u2019re keeping a lot of duplicate data. By having the mins kept in a separate stack, we don\u2019t \nhave this duplicate data (although we do use up a lot of extra space because we have a stack \nnode instead of a single int).", "question": "How would you design a \nstack which, in addition to push and pop, also has a function \nmin which returns the minimum element? Push, pop and min should all operate in \nO(1) time.", "id": 28}, {"answer": "In this problem, we\u2019ve been told what our data structure should look like:\n1 class SetOfStacks {\n2 ArrayList<Stack> stacks = new ArrayList<Stack>();\n3 public void push(int v) { ... }\n4 public int pop() { ... }\n5 }\nWe know that push() should behave identically to a single stack, which means that we need \npush() to call push on the last stack. We have to be a bit careful here though: if the last stack \nis at capacity, we need to create a new stack. Our code should look something like this:\n1 public void push(int v) {\n2 Stack last = getLastStack();\n3 if (last != null && !last.isAtCapacity()) { // add to last stack\n4 last.push(v);\n5 } else { // must create new stack\n6 Stack stack = new Stack(capacity);\n7 stack.push(v);\n8 stacks.add(stack);\n9 }\n10 }\nWhat should pop() do? It should behave similarly to push(), in that it should operate on the \nlast stack. If the last stack is empty (after popping), then we should remove it from the list \nof stacks.\n1 public int pop() {\n2 Stack last = getLastStack();\n3 int v = last.pop();\n4 if (last.size == 0) stacks.remove(stacks.size() - 1);\n5 return v;\n6 }\n\nWhat about the follow up question? This is a bit trickier to implement, but essentially we \nshould imagine a \u201crollover\u201d system. If we pop an element from stack 1, we need to remove \nthe \nbottom\n of stack 2 and push it onto stack 1. We then need to rollover from stack 3 to stack \n2, stack 4 to stack 3, etc.\nNOTE: You could make an argument that, rather than \u201crolling over,\u201d we should \nbe OK with some stacks not being at full capacity. This would improve the time \ncomplexity (by a fair amount, with a large number of elements), but it might \nget us into tricky situations later on if someone assumes that all stacks (other \nthan the last) operate at full capacity. There\u2019s no \u201cright answer\u201d here; discuss this \ntrade-ofi with your interviewer!\n1 public class SetOfStacks {\n2 ArrayList<Stack> stacks = new ArrayList<Stack>();\n3 public int capacity;\n4 public SetOfStacks(int capacity) { this.capacity = capacity; }\n5 \n6 public Stack getLastStack() {\n7 if (stacks.size() == 0) return null;\n8 return stacks.get(stacks.size() - 1);\n9 }\n10 \n11 public void push(int v) { /* see earlier code */ }\n12 public int pop() {\n13 Stack last = getLastStack();\n14 System.out.println(stacks.size());\n15 int v = last.pop();\n16 if (last.size == 0) stacks.remove(stacks.size() - 1);\n17 return v;\n18 }\n19 \n20 public int popAt(int index) {\n21 return leftShift(index, true);\n22 }\n23 \n24 public int leftShift(int index, boolean removeTop) {\n25 Stack stack = stacks.get(index);\n26 int removed_item;\n27 if (removeTop) removed_item = stack.pop();\n28 else removed_item = stack.removeBottom();\n29 if (stack.isEmpty()) {\n30 stacks.remove(index);\n31 } else if (stacks.size() > index + 1) {\n32 int v = leftShift(index + 1, false);\n\n33 stack.push(v);\n34 }\n35 return removed_item;\n36 }\n37 }\n38 \n39 public class Stack {\n40 private int capacity;\n41 public Node top, bottom;\n42 public int size = 0;\n43 \n44 public Stack(int capacity) { this.capacity = capacity; }\n45 public boolean isAtCapacity() { return capacity == size; }\n46 \n47 public void join(Node above, Node below) {\n48 if (below != null) below.above = above;\n49 if (above != null) above.below = below;\n50 }\n51 \n52 public boolean push(int v) {\n53 if (size >= capacity) return false;\n54 size++;\n55 Node n = new Node(v);\n56 if (size == 1) bottom = n;\n57 join(n, top);\n58 top = n;\n59 return true;\n60 }\n61 \n62 public int pop() {\n63 Node t = top;\n64 top = top.below;\n65 size--;\n66 return t.value;\n67 }\n68 \n69 public boolean isEmpty() { return size == 0; }\n70 public int removeBottom() {\n71 Node b = bottom;\n72 bottom = bottom.above;\n73 if (bottom != null) bottom.below = null;\n74 size--;\n75 return b.value;\n76 }\n77 }", "question": "Imagine a (literal) \nstack of plates. If the stack gets too high, it might topple. There-fore, in real life, we would likely start a new stack when the previous stack exceeds \nsome threshold. Implement a data structure \nSetOfStacks that mimics this. SetOf-Stacks should be composed of several stacks, and should create a new stack once \nthe previous one exceeds capacity. SetOfStacks.push() and SetOfStacks.pop() should \nbehave identically to a single stack (that is, pop() should return the same values as it \nwould if there were just a single stack).\nFOLLOW UP\nImplement a function popAt(int index) which performs a pop operation on a specific \nsub-stack.", "id": 30}, {"answer": "We need to move N disks from Rod 1 to Rod 3, but let\u2019s start from the beginning. Moving the \ntop disk is easy - we just move it to Disk 3. \nCan we move the top two disks? Yes:\n1. Move Disk 1 from Rod 1 to Rod 2\n2. Move Disk 2 from Rod 1 to Rod 3\n3. Move Disk 1 from Rod 2 to Rod 3\nCan we move the top three disks? \n1. We know we can move the top two disks around from one Rod to another (as shown \nearlier), so let\u2019s assume we have moved Disk 1 and 2 to Rod 2.\n2. Move Disk 3 to Rod 3\n3. Again we know we can move the top two disks around, so let\u2019s move them from Rod 2 \nto Rod 3.\nThis approach leads to a natural \nrecursive algorithm:\n1 public static void main(String[] args) \n2 int n = 5;\n3 Tower[] towers = new Tower[n];\n4 for (int i = 0; i < 3; i++) towers[i] = new Tower(i);\n5 for (int i = n - 1; i >= 0; i--) towers[0].add(i);\n6 towers[0].moveDisks(n, towers[2], towers[1]);\n7 }\n8 \n9 public class Tower {\n10 private Stack<Integer> disks;\n11 private int index;\n12 public Tower(int i) {\n\n13 disks = new Stack<Integer>();\n14 index = i;\n15 }\n16 \n17 public int index() {\n18 return index;\n19 }\n20 \n21 public void add(int d) {\n22 if (!disks.isEmpty() && disks.peek() <= d) {\n23 System.out.println(\u201cError placing disk \u201d + d);\n24 } else {\n25 disks.push(d);\n26 }\n27 }\n28 \n29 public void moveTopTo(Tower t) {\n30 int top = disks.pop();\n31 t.add(top);\n32 System.out.println(\u201cMove disk \u201d + top + \u201c from \u201d + index() +\n33 \u201c to \u201d + t.index());\n34 }\n35 \n36 public void print() {\n37 System.out.println(\u201cContents of Tower \u201c + index());\n38 for (int i = disks.size() - 1; i >= 0; i--) {\n39 System.out.println(\u201c \u201c + disks.get(i));\n40 }\n41 }\n42 \n43 public void moveDisks(int n, Tower destination, Tower buffer) {\n44 if (n > 0) {\n45 moveDisks(n - 1, buffer, destination);\n46 moveTopTo(destination);\n47 buffer.moveDisks(n - 1, destination, this);\n48 }\n49 }\n50 }", "question": "In the classic problem of the Towers of Hanoi, you have 3 rods and N disks of different \nsizes which can slide onto any tower. The puzzle starts with disks sorted in ascending \norder of size from top to bottom (e.g., each disk sits on top of an even larger one). You \nhave the following constraints: \n(A) Only one disk can be moved at a time.\n(B) A disk is slid off the top of one rod onto the next rod.\n(C) A disk can only be placed on top of a larger disk.\nWrite a program to move the disks from the first rod to the last using \nStacks.", "id": 32}, {"answer": "Since the major difference between a queue and a stack is the order (first-in-first-out vs. last-in-first-out), we know that we need to modify peek() and pop() to go in reverse order. We \ncan use our second stack to reverse the order of the elements (by popping s1 and pushing \nthe elements on to s2). In such an implementation, on each peek() and pop() operation, we \nwould pop everything from s1 onto s2, perform the peek / pop operation, and then push \neverything back.\nThis will work, but if two pop / peeks are performed back-to-back, we\u2019re needlessly moving \nelements. We can implement a \u201clazy\u201d approach where we let the elements sit in s2. \ns1 will thus be ordered with the newest elements on the top, while s2 will have the oldest \nelements on the top. We push the new elements onto s1, and peek and pop from s2. When \ns2 is empty, we\u2019ll transfer all the elements from s1 onto s2, in reverse order.\n1 public class MyQueue<T> {\n2 Stack<T> s1, s2;\n3 public MyQueue() {\n4 s1 = new Stack<T>();\n5 s2 = new Stack<T>();\n6 }\n7 \n8 public int size() { \n9 return s1.size() + s2.size(); \n10 }\n11 \n12 public void add(T value) { \n13 s1.push(value); \n14 }\n15 \n16 public T peek() {\n17 if (!s2.empty()) return s2.peek();\n18 while (!s1.empty()) s2.push(s1.pop());\n19 return s2.peek(); \n20 }\n21 \n22 public T remove() {\n23 if (!s2.empty()) return s2.pop();\n24 while (!s1.empty()) s2.push(s1.pop());\n25 return s2.pop();\n26 }\n27 }", "question": "Implement a \nMyQueue class which implements a \nqueue using two \nstacks.", "id": 34}, {"answer": "Sorting can be performed with one more stack. The idea is to pull an item from the original \nstack and push it on the other stack. If pushing this item would violate the sort order of the \nnew stack, we need to remove enough items from it so that it\u2019s possible to push the new \nitem. Since the items we removed are on the original stack, we\u2019re back where we started. \nThe \nalgorithm is O(N^2) and appears below.\n1 public static Stack<Integer> sort(Stack<Integer> s) {\n2 Stack<Integer> r = new Stack<Integer>();\n3 while(!s.isEmpty()) {\n4 int tmp = s.pop();\n5 while(!r.isEmpty() && r.peek() > tmp) {\n6 s.push(r.pop());\n7 }\n8 r.push(tmp);\n9 }\n10 return r;\n11 }", "question": "Write a program to \nsort a \nstack in ascending order. You should not make any assump-tions about how the stack is implemented. The following are the only functions that \nshould be used to write this program: push", "id": 36}, {"answer": "The idea is very simple: the difference of min depth and max depth should not exceed 1, \nsince the difference of the min and the max depth is the maximum distance difference pos-sible in the tree.\n1 public static int maxDepth(TreeNode root) {\n2 if (root == null) {\n3 return 0;\n4 }\n5 return 1 + Math.max(maxDepth(root.left), maxDepth(root.right));\n6 }\n7 \n8 public static int minDepth(TreeNode root) {\n9 if (root == null) {\n10 return 0;\n11 }\n12 return 1 + Math.min(minDepth(root.left), minDepth(root.right));\n13 }\n14 \n15 public static boolean isBalanced(TreeNode root){\n16 return (maxDepth(root) - minDepth(root) <= 1);\n17 }", "question": "Implement a function to check if a \ntree is balanced. For the purposes of this question, \na balanced tree is defined to be a tree such that no two leaf nodes differ in distance \nfrom the root by more than one.", "id": 38}, {"answer": "This problem can be solved by just simple graph traversal, such as \ndepth first search or \nbreadth first search. We start with one of the two nodes and, during traversal, check if the \nother node is found. We should mark any node found in the course of the algorithm as \u201aal-ready visited\u2019 to avoid cycles and repetition of the nodes.\n1 public enum State {\n2 Unvisited, Visited, Visiting;\n3 } \n4 \n5 public static boolean search(Graph g, Node start, Node end) { \n6 LinkedList<Node> q = new LinkedList<Node>(); // operates as Stack\n7 for (Node u : g.getNodes()) {\n8 u.state = State.Unvisited;\n9 }\n10 start.state = State.Visiting;\n11 q.add(start);\n12 Node u;\n13 while(!q.isEmpty()) {\n14 u = q.removeFirst(); // i.e., pop()\n15 if (u != null) {\n16 for (Node v : u.getAdjacent()) {\n17 if (v.state == State.Unvisited) {\n18 if (v == end) {\n19 return true;\n20 } else {\n21 v.state = State.Visiting;\n22 q.add(v);\n23 }\n24 }\n25 }\n26 u.state = State.Visited;\n27 }\n28 }\n29 return false;\n30 }", "question": "Given a directed graph, design an algorithm to find out whether there is a route be-tween two nodes.", "id": 40}, {"answer": "We will try to create a \nbinary tree such that for each node, the number of nodes in the left \nsubtree and the right subtree are equal, if possible. \nAlgorithm:\n1. Insert into the tree the middle element of the array.\n2. Insert (into the left subtree) the left subarray elements\n3. Insert (into the right subtree) the right subarray elements\n4. Recurse\n \n1 public static TreeNode addToTree(int arr[], int start, int end){\n2 if (end < start) {\n3 return null;\n4 }\n5 int mid = (start + end) / 2;\n6 TreeNode n = new TreeNode(arr[mid]);\n7 n.left = addToTree(arr, start, mid - 1);\n8 n.right = addToTree(arr, mid + 1, end);\n9 return n;\n10 }\n11 \n12 public static TreeNode createMinimalBST(int array[]) {\n13 return addToTree(array, 0, array.length - 1);\n14 }", "question": "Given a sorted (increasing order) array, write an algorithm to create a binary tree with \nminimal height.", "id": 42}, {"answer": "We can do a simple level by level traversal of the tree, with a slight modification of the breath-first traversal of the tree.\nIn a usual \nbreath first search traversal, we simply traverse the nodes without caring which \nlevel we are on. In this case, it is critical to know the level. We thus use a dummy node to \nindicate when we have finished one level and are starting on the next.\n1 \n2 int level = 0;\n3 ArrayList<LinkedList<TreeNode>> result = \n4 new ArrayList<LinkedList<TreeNode>>();\n5 LinkedList<TreeNode> list = new LinkedList<TreeNode>();\n6 list.add(root);\n7 result.add(level, list);\n8 while (true) {\n9 list = new LinkedList<TreeNode>();\n10 for (int i = 0; i < result.get(level).size(); i++) {\n11 TreeNode n = (TreeNode) result.get(level).get(i);\n12 if (n != null) {\n13 if(n.left != null) list.add(n.left);\n14 if(n.right!= null) list.add(n.right);\n15 }\n16 }\n17 if (list.size() > 0) {\n18 result.add(level + 1, list);\n19 } else { \n20 break;\n21 }\n22 level++;\n23 }\n24 return result;\n25 }", "question": "Given a binary search \ntree, design an algorithm which creates a \nlinked list of all the \nnodes at each depth (eg, if you have a tree with depth D, you\u2019ll have D linked lists).", "id": 44}, {"answer": "We approach this problem by thinking very, very carefully about what happens on an in-order traversal. On an in-order traversal, we visit X.left, then X, then X.right.\nSo, if we want to find X.successor(), we do the following:\n1. If X has a right child, then the successor must be on the right side of X (because of the \norder in which we visit nodes). Specifically, the left-most child must be the first node visited \nin that subtree.\n2. Else, we go to X\u2019s parent (call it P). \n2.a. If X was a left child (P.left = X), then P is the successor of X\n2.b. If X was a right child (P.right = X), then we have fully visited P, so we call successor(P). \n1 public static TreeNode inorderSucc(TreeNode e) { \n2 if (e != null) { \n3 TreeNode p; \n4 // Found right children -> return 1st inorder node on right\n5 if (e.parent == null \n6 p = leftMostChild(e.right); \n7 } else { \n8 // Go up until we\u2019re on left instead of right (case 2b)\n9 while ((p = e.parent) != null) { \n10 if (p.left == e) {\n11 break; \n12 }\n13 e = p; \n14 } \n15 } \n16 return p; \n17 } \n18 return null; \n19 } \n20 \n21 public static TreeNode leftMostChild(TreeNode e) {\n22 if (e == null) return null;\n23 while (e.left != null) e = e.left; \n24 return e; \n25 }", "question": "Write an algorithm to find the \u201anext\u2019 node (e.g., in-order successor) of a given node in \na binary search \ntree where each node has a link to its parent.", "id": 46}, {"answer": "If this were a binary search tree, we could do a modified find on the two nodes and see where \nthe paths diverge. Unfortunately, this is not a binary search tree, so we much try other ap-proaches. \nAttempt #1:\nIf each node has a link to its parent, we could trace p and q\u2019s paths up until they intersect.\nAttempt #2:\nAlternatively, you could follow a chain in which p and q are on the same side. That is, if p and \nq are both on the left of the node, branch left to look for the common ancestor. When p and \nq are no longer on the same side, you must have found the first common ancestor.\n1 public Tree commonAncestor(Tree root, Tree p, Tree q) { \n2 if (covers(root.left, p) && covers(root.left, q)) \n3 return commonAncestor(root.left, p, q);\n4 if (covers(root.right, p) && covers(root.right, q)) \n5 return commonAncestor(root.right, p, q);\n6 return root; \n7 } \n8 private boolean covers(Tree root, Tree p) { /* is p a child of root? */ \n9 if (root == null) return false;\n10 if (root == p) return true;\n11 return covers(root.left, p) \n12 }\nWhat is the running time of this algorithm? One way of looking at this is to see how many \ntimes each node is touched. \nCovers\n touches every child node, so we know that every single \nnode in the tree must be touched at least once, and many nodes are touched multiple times. \nAttempt #3:\nFor any node r, we know the following:\n1. If p is on one side and q is on the other, r is the first common ancestor.\n2. Else, the first common ancestor is on the left or the right side.\nSo, we can create a simple recursive algorithm called search that calls search(left side) and \nsearch(right side) looking at how many nodes (p or q) are placed from the left side and from \nthe right side of the current node. If there are two nodes on one of the sides, then we have \n\nto check if the child node on this side is p or q (because in this case the current node is the \ncommon ancestor). If the child node is neither p nor q, we should continue to search further \n(starting from the child).\nIf one of the searched nodes (p or q) is located on the right side of the current node, then \nthe other node is located on the other side. Thus the current node is the common ancestor.\n1 static int TWO_NODES_FOUND = 2;\n2 static int ONE_NODE_FOUND = 1;\n3 static int NO_NODES_FOUND = 0;\n4 \n5 // Checks how many \u201cspecial\u201d nodes are located under this root\n6 int covers(TreeNode root, TreeNode p, TreeNode q) {\n7 int ret = NO_NODES_FOUND;\n8 if (root == null) return ret;\n9 if (root == p \n10 ret += covers(root.left, p, q);\n11 if(ret == TWO_NODES_FOUND) // Found p and q \n12 return ret;\n13 return ret + covers(root.right, p, q);\n14 }\n15 \n16 TreeNode commonAncestor(TreeNode root, TreeNode p, TreeNode q) {\n17 if (q == p && (root.left == q \n18 int nodesFromLeft = covers(root.left, p, q); // Check left side\n19 if (nodesFromLeft == TWO_NODES_FOUND) {\n20 if(root.left == p \n21 else return commonAncestor(root.left, p, q);\n22 } else if (nodesFromLeft == ONE_NODE_FOUND) {\n23 if (root == p) return p;\n24 else if (root == q) return q;\n25 }\n26 int nodesFromRight = covers(root.right, p, q); // Check right side\n27 if(nodesFromRight == TWO_NODES_FOUND) {\n28 if(root.right == p \n29 else return commonAncestor(root.right, p, q);\n30 } else if (nodesFromRight == ONE_NODE_FOUND) {\n31 if (root == p) return p;\n32 else if (root == q) return q;\n33 }\n34 if (nodesFromLeft == ONE_NODE_FOUND && \n35 nodesFromRight == ONE_NODE_FOUND) return root;\n36 else return null;\n37 }", "question": "Design an algorithm and write code to find the first common ancestor of two nodes \nin a binary \ntree. Avoid storing additional nodes in a data structure. NOTE: This is not \nnecessarily a binary search tree.", "id": 48}, {"answer": "Note that the problem here specifies that T1 has millions of nodes\u2014this means that we \nshould be careful of how much space we use. Let\u2019s say, for example, T1 has 10 million \nnodes\u2014this means that the data alone is about 40 mb. We could create a string representing \nthe inorder and preorder traversals. If T2\u2019s preorder traversal is a substring of T1\u2019s preorder \ntraversal, and T2\u2019s inorder traversal is a substring of T1\u2019s inorder traversal, then T2 is a sub-string of T1. We can check this using a su\u02ddx tree. However, we may hit memory limitations \nbecause su\u02ddx trees are extremely memory intensive. If this become an issue, we can use an \nalternative approach.\nAlternative Approach: \nThe treeMatch procedure visits each node in the small tree at most \nonce and is called no more than once per node of the large tree. Worst case runtime is at \nmost \nO(n * m), where n and m are the sizes of trees T1 and T2, respectively. If k is the number \nof occurrences of T2\u2019s root in T1, the worst case runtime can be characterized as O(n + k * m).\n1 boolean containsTree(TreeNode t1, TreeNode t2) {\n2 if (t2 == null) return true; // The empty tree is always a subtree\n3 else return subTree(t1, t2);\n4 }\n5 \n6 boolean subTree(TreeNode r1, TreeNode r2) {\n7 if (r1 == null) \n8 return false; // big tree empty & subtree still not found.\n9 if (r1.data == r2.data) {\n10 if (matchTree(r1,r2)) return true;\n11 }\n12 return (subTree(r1.left, r2) \n13 }\n14 \n15 boolean matchTree(TreeNode r1, TreeNode r2) {\n16 if (r2 == null && r1 == null) \n17 return true; // nothing left in the subtree\n18 if (r1 == null \n19 return false; // big tree empty & subtree still not found\n20 if (r1.data != r2.data) \n21 return false; // data doesn\u2019t match\n22 return (matchTree(r1.left, r2.left) && \n23 matchTree(r1.right, r2.right));\n24 }\n25 }", "question": "You have two very large binary \ntrees: T1, with millions of nodes, and T2, with hun-dreds of nodes. Create an algorithm to decide if T2 is a subtree of T1.", "id": 50}, {"answer": "Let\u2019s approach this problem by simplifying it. What if the path had to start at the root? In that \ncase, we would have a much easier problem:\nStart from the root and branch left and right, computing the sum thus far on each path. \nWhen we find the sum, we print the current path. Note that we don\u2019t stop just because \nwe found the sum. Why? Because we could have the following path (assume we are \nlooking for the sum 5): 2 + 3 + \u20134 + 3 + 1 + 2. If we stopped once we hit 2 + 3, we\u2019d miss \nseveral paths (2 + 3 + -4 + 3 + 1 and 3 + -4 + 3 + 1 + 2). So, we keep going along every \npossible path.\nNow, what if the path can start anywhere? In that case, we make a small modification. On \nevery node, we look \u201cup\u201d to see if we\u2019ve found the sum. That is\u2014rather than asking \u201cdoes \nthis node start a path with the sum?,\u201d we ask \u201cdoes this node complete a path with the sum?\u201d\n1 void \n2 int level) {\n3 if (head == null) return;\n4 int tmp = sum;\n5 buffer.add(head.data);\n6 for (int i = level;i >- 1; i--){\n7 tmp -= buffer.get(i);\n8 if (tmp == 0) print(buffer, i, level);\n9 }\n10 ArrayList<Integer> c1 = (ArrayList<Integer>) buffer.clone();\n11 ArrayList<Integer> c2 = (ArrayList<Integer>) buffer.clone();\n12 \n\n13 \n\n14 }\n15 \n16 void print(ArrayList<Integer> buffer, int level, int i2) {\n17 for (int i = level; i <= i2; i++) {\n18 System.out.print(buffer.get(i) + \u201c \u201d);\n19 }\n20 System.out.println();\n21 }\nWhat is the time complexity of this algorithm? Well, if a node is at level r, we do r amount \nof work (that\u2019s in the looking \u201cup\u201d step). We can take a guess at \nO(n lg n) (n nodes, doing an \n\naverage of lg n amount of work on each step), or we can be super mathematical:There are 2^r nodes at level r.1*2^1 + 2*2^2 + 3*2^3 + 4*2^4 + ... d * 2^d = sum(r * 2^r, r from 0 to depth) = 2 (d-1) * 2^d + 2n = 2^d ==> d = lg nNOTE: 2^lg(x) = xO(2 (lg n - 1) * 2^(lg n) + 2) = O(2 (lg n - 1) * n ) = O(n lg n)\nFollowing similar logic, our space complexity is O(n lg n).", "question": "You are given a binary \ntree in which each node contains a value. Design an algorithm \nto print all paths which \nsum up to that value. Note that it can be any path in the tree - it does not have to start at the root.", "id": 52}, {"answer": "This code operates by clearing all bits in N between position i and j, and then ORing to put \nM in there.\n1 public static int updateBits(int n, int m, int i, int j) {\n2 int max = ~0; /* All 1\u2019s */\n3 \n4 // 1\u2019s through position j, then 0\u2019s\n5 int left = max - ((1 << j) - 1);\n6 \n7 // 1\u2019s after position i\n8 int right = ((1 << i) - 1); \n9 \n10 // 1\u2019s, with 0s between i and j\n11 int mask = left \n12 \n13 // Clear i through j, then put m in there \n14 return (n & mask) \n15 }", "question": "You are given two 32-bit numbers, N and M, and two bit positions, i and j. Write a \nmethod to set all \nbits between i and j in N equal to M (e.g., M becomes a substring of \nN located at i and starting at j). \nEXAMPLE:\nInput: N = 10000000000, M = 10101, i = 2, j = 6\nOutput: N = 10001010100", "id": 54}, {"answer": "First, let\u2019s start off by asking ourselves what a non-integer number in binary looks like. By \nanalogy to a decimal number, the number n = 0.101 = 1 * (1/2^1) + 0 * (1/2^2) + 1 * (1/2^3). \nPrinting the int part of n is straight-forward (see below). To print the decimal part, we can \nmultiply by 2 and check if the 2*n is greater than or equal to one. This is essentially \u201cshifting\u201d \nthe fractional sum. That is:r = 2*n = 2*0.101 = 1*(1 / 2^0) + 0*(1 / 2^1) + 1*(1 / 2^2) = 1.01\nIf r >= 1, then we know that n had a 1 right after the decimal point. By doing this continu-ously, we can check every digit.\n1 public static String printBinary(String n) {\n2 int intPart = Integer.parseInt(n.substring(0, n.indexOf(\u201a.\u2019)));\n3 double decPart = Double.parseDouble(\n4 n.substring(n.indexOf(\u201a.\u2019), n.length()));\n5 String int_string = \u201c\u201d;\n6 while (intPart > 0) {\n7 int r = intPart % 2;\n8 intPart >>= 1;\n9 int_string = r + int_string;\n10 }\n11 StringBuffer dec_string = new StringBuffer();\n12 while (decPart > 0) {\n13 if (dec_string.length() > 32) return \u201cERROR\u201d;\n14 if (decPart == 1) {\n15 dec_string.append((int)decPart);\n16 break;\n17 }\n18 double r = decPart * 2;\n19 if (r >= 1) {\n20 dec_string.append(1);\n21 decPart = r - 1;\n22 } else {\n23 dec_string.append(0);\n24 decPart = r;\n25 }\n26 }\n27 return int_string + \u201c.\u201d + dec_string.toString();\n28 }", "question": "Given a (decimal - e.g. 3.72) number that is passed in as a string, print the \nbinary rep-resentation. If the number can not be represented accurately in binary, print \u201cERROR\u201d", "id": 56}, {"answer": "The Brute Force Approach:\nAn easy approach is simply brute force: count the number of 1\u2019s in n, and then increment \n(or decrement) until you find a number with the same number of 1\u2019s. Easy - but not terribly \ninteresting. Can we do something a bit more optimal? Yes!\nNumber Properties Approach for Next Number\nObservations:\n \n\u00bb If we \u201cturn on\u201d a 0, we need to \u201cturn off\u201d a 1\n \n\u00bb If we turn on a 0 at bit i and turn off a 1 at bit j, the number changes by 2^i - 2^j.\n \n\u00bb If we want to get a bigger number with the same number of 1s and 0s, i must be bigger \nthan j.\nSolution:\n1. Traverse from right to left. Once we\u2019ve passed a 1, turn on the next 0. We\u2019ve now in-creased the number by 2^i. Yikes! Example: xxxxx011100 becomes xxxxx111100\n2. Turn off the one that\u2019s just to the right side of that. We\u2019re now bigger by 2^i - 2^(i-1) \nExample: xxxxx111100 becomes xxxxx101100\n3. Make the number as small as possible by rearranging all the 1s to be as far right as pos-sible: Example: xxxxx101100 becomes xxxxx100011\n \nTo get the previous number, we do the reverse.\n1. Traverse from right to left. Once we\u2019ve passed a zero, turn off the next 1. Example: \nxxxxx100011 becomes xxxxx000011.\n2. Turn on the 0 that is directly to the right. Example: xxxxx000011 becomes \nxxxxx010011.\n3. Make the number as big as possible by shifting all the ones as far to the left as pos-sible. Example: xxxxx010011 becomes xxxxx011100 .\n \nAnd now, for the code. Note the emphasis on pulling common code out into a reusable func-tion. Your interviewer will look for \u201cclean code\u201d like this.\n\n1 public static boolean GetBit(int n, int index) {\n2 return ((n & (1 << index)) > 0);\n3 }\n4 \n5 public static int SetBit(int n, int index, boolean b) {\n6 if (b) {\n7 return n \n8 } else {\n9 int mask = ~(1 << index);\n10 return n & mask;\n11 }\n12 }\n13 \n14 public static int GetNext_NP(int n) {\n15 if (n <= 0) return -1;\n16 \n17 int index = 0;\n18 int countOnes = 0;\n19 \n20 \n\n21 while (!GetBit(n, index)) index++;\n22 \n23 // Turn on next zero.\n24 while (GetBit(n, index)) {\n25 index++;\n26 countOnes++;\n27 }\n28 n = SetBit(n, index, true);\n29 \n30 // Turn off previous one\n31 index--;\n32 n = SetBit(n, index, false);\n33 countOnes--;\n34 \n35 // Set zeros\n36 for (int i = index - 1; i >= countOnes; i--) {\n37 n = SetBit(n, i, false);\n38 }\n39 \n40 // Set ones\n41 for (int i = countOnes - 1; i >= 0; i--) {\n42 n = SetBit(n, i, true);\n43 }\n44 \n45 return n;\n\n46 }\n47 \n48 public static int GetPrevious_NP(int n) {\n49 if (n <= 0) return -1; // Error\n50 \n51 int index = 0;\n52 int countZeros = 0;\n53 \n54 \n55 while (GetBit(n, index)) index++;\n56 \n57 // Turn off next 1.\n58 while (!GetBit(n, index)) {\n59 index++;\n60 countZeros++;\n61 }\n62 n = SetBit(n, index, false);\n63 \n64 // Turn on previous zero\n65 index--;\n66 n = SetBit(n, index, true);\n67 countZeros--;\n68 \n69 // Set ones\n70 for (int i = index - 1; i >= countZeros; i--) {\n71 n = SetBit(n, i, true);\n72 }\n73 \n74 // Set zeros\n75 for (int i = countZeros - 1; i >= 0; i--) {\n76 n = SetBit(n, i, false);\n77 }\n78 \n79 return n;\n80 }", "question": "Given an integer, print the next smallest and next largest number that have the same \nnumber of 1 bits in their \nbinary representation.", "id": 58}, {"answer": "We can work backwards to solve this question. \nWhat does it mean if A & B == 0?\nIt means that A and B never have a 1 bit in the same place. So if n & (n-1) == 0, then n and \nn-1 never share a 1.\nWhat does n-1 look like (as compared with n)?\nTry doing subtraction by hand (in base 2 or 10). What happens? 1101011000 [base 2]- 1 = 1101010111 [base 2] 593100 [base 10]- 1 = 593099 [base 10]\nWhen you subtract 1 from a number, you look at the least significant bit. If it\u2019s a 1 you change \nit to zero and you are done. If it\u2019s a zero, you must \u201cborrow\u201d from a larger bit. So, you go to \nincreasingly larger bits, changing each bit from a 0 to a 1, until you find a 1. You flip that one \nto a 0 and you are done. \nThus, n-1 will look like n, except that n\u2019s initial 0s will be 1\u2019s in n-1, and n\u2019s least significant 1 \nwill be a 0 in (n-1). That is:if n = abcde1000then n-1 = abcde0111\nSo what does n & (n-1) == 0 indicate?\nn and (n-1) must have no 1s in common. Given that they look like this:if n = abcde1000then n-1 = abcde0111\nabcde must be all 0s, which means that n must look like this: 00001000. n is therefore a \npower of two.\nSo, we have our answer: ((n & (n-1)) == 0) checks if n is a power of 2 (or 0).", "question": "Explain what the following code \ndoes: \n((n & (n-1)) == 0).", "id": 60}, {"answer": "This seemingly complex problem is actually rather straightforward. To approach this, ask \nyourself how you would figure out which bits in two numbers are different. Simple: with an \nxor.\nEach 1 in the xor will represent one different bit between A and B. We then simply need to \ncount the number of bits that are 1.\n1 public static int bitSwapRequired(int a, int b) {\n2 int count = 0;\n3 for (int c = a ^ b; c != 0; c = c >> 1) { \n4 count += c & 1;\n5 }\n6 return count;\n7 }", "question": "Write a function to determine the number of bits required to convert integer A to \ninteger B.\nInput: 31, 14\nOutput: 2", "id": 62}, {"answer": "Mask all odd bits with 10101010 in binary (which is 0xAA), then shift them left to put them in \nthe even bits. Then, perform a similar operation for even bits. This takes a total 5 instructions.\n1 public static int swapOddEvenBits(int x) { \n2 return ( ((x & 0xaaaaaaaa) >> 1) \n3 }", "question": "Write a program to swap odd and even \nbits in an integer with as few instructions as \npossible (e.g., bit 0 and bit 1 are swapped, bit 2 and bit 3 are swapped, etc).", "id": 64}, {"answer": "Picture a list of binary numbers between 0 to n. What will change when we remove one \nnumber? We\u2019ll get an imbalance of 1s and 0s in the least significant bit. That is, before re-moving the number k, we have this list of least significant bits (in some order):0 0 0 0 0 1 1 1 1 1 OR 0 0 0 0 0 1 1 1 1\nSuppose we secretly removed either a 1 or a 0 from this list. Could we tell which one was \nremoved? remove(0 from 0 0 0 0 0 1 1 1 1 1) --> 0 0 0 0 1 1 1 1 1remove(1 from 0 0 0 0 0 1 1 1 1 1) --> 0 0 0 0 0 1 1 1 1remove(0 from 0 0 0 0 0 1 1 1 1) --> 0 0 0 0 1 1 1 1remove(1 from 0 0 0 0 0 1 1 1 1) --> 0 0 0 0 0 1 1 1\nNote that if 0 is removed, we always wind up with count(1) >= count(0). If 1 is removed, we \nwind up with count(1) < count(0). Therefore, we can look at the least significant bit to figure \nout in O(N) time whether the missing number has a 0 or a 1 in the least significant bit (LSB). \nIf LSB(missing) == 0, then we can discard all numbers with LSB = 1. If LSB(missing) == 1, we \ncan discard all numbers with LSB = 0.\nWhat about the next iteration, with the second least significant bit (SLSB)? We\u2019ve discarded \nall the numbers with LSB = 1, so our list looks something like this (if n = 5, and missing = 3):000000000100010-----00100001010011000111010000100101010010110110001101\nOur SLSBs now look like 0 0 1 0 1 0. Using the same logic as we applied for LSB, we can figure \nout that the missing number must have SLSB = 1. Our number must look like xxxx11.\nThird iteration, discarding numbers with SLSB = 0:000000000100010-----00100001010011000111010000100101010010110110001101\nWe can now compute that count(TLSB = 1) = 1 and count(TLSB = 1) = 1. Therefore, TLSB = 0. \nWe can recurse repeatedly, building our number bit by \nbit:\n\n1 ArrayList<BitInteger> array) {\n2 \n\n3 } \n4 \n5 int \n6 if (column < 0) { // Base case and error condition\n7 return 0;\n8 }\n9 ArrayList<BitInteger> oddIndices = new ArrayList<BitInteger>();\n10 ArrayList<BitInteger> evenIndices = new ArrayList<BitInteger>();\n11 for (BitInteger t : input) {\n12 if (t.fetch(column) == 0) {\n13 evenIndices.add(t);\n14 } else {\n15 oddIndices.add(t);\n16 }\n17 }\n18 if (oddIndices.size() >= evenIndices.size()) {\n19 \n20 } else {\n21 \n22 }\n23 }\n24 \nWhat is the run-time of this algorithm? On the first pass, we look at O(N) bits. On the second \npass, we\u2019ve eliminated N/2 numbers, so we then look at O(N/2) bits. On the third pass, we \nhave eliminated another half of the numbers, so we then look at O(N/4) bits. If we keep go-ing, we get an equation that looks like:O(N) + O(N/2) + O(N/4) + O(N/8) + ... = O(2N) = O(N)\nOur run-time is O(N).", "question": "An array A[1...n] contains all the integers from 0 to n except for one number which is \nmissing. In this problem, we cannot access an entire integer in A with a single opera-tion. The elements of A are represented in \nbinary, and the only operation we can use \nto access them is \u201cfetch the jth bit of A[i]\u201d, which takes constant time. Write code to \nfind the missing integer. Can you do it in \nO(n) time?", "id": 66}, {"answer": "An interviewer is asking this problem to see how you think and approach problems\u2014so \ndon\u2019t just guess randomly.\nTry approaching this the following way: What sorts of operations would get us to 8? I can \nthink of a few:4 * 2 = 816 / 2 = 84 + 4 = 8\nLet\u2019s start with the first one. Is there any way to make 3 1 3 6 produce 4 * 2? We can easily \nnotice that 3 + 1 = 4 (the first two numbers). We can also notice that 6 / 3 = 2. If we had \u201c3 1 \n6 3\u201d, we\u2019d be done, since (3 + 1)*(6 / 3) = 8. Although it seems a little unconventional to do \nthis, we can, in fact, just flip the 6 and the 3 to get the solution:(( 3 + 1 ) / 3) * 6 = 8", "question": "Add \narithmetic operators (plus, minus, times, divide) to make the following expres-sion true: 3 1 3 6 = 8. You can use any parentheses you\u2019d like.", "id": 68}, {"answer": "Impossible. Here\u2019s why: The chess board initially has 32 black and 32 white squares. By re-moving opposite corners (which must be the same color), we\u2019re left with 30 of one color and \n32 of the other color. Let\u2019s say, for the sake of argument, that we have 30 black and 32 white \nsquares.\nWhen we lay down each domino, we\u2019re taking up one white and one black square. Therefore, \n31 dominos will take up 31 white squares and 31 black squares exactly. On this board, how-ever, we must have 30 black squares and 32 white squares. Hence, it is impossible.", "question": "There is an 8x8 chess board in which two diagonally opposite corners have been cut \noff. You are given 31 dominos, and a single domino can cover exactly two squares. \nCan you use the 31 dominos to cover the entire board? Prove your answer (by provid-ing an example, or showing why it\u2019s impossible).", "id": 70}, {"answer": "We can pour water back and forth between the two jugs as follows:", "question": "You have a five quart jug and a three quart jug, and an unlimited supply of water \n(but no measuring cups). How would you come up with exactly four quarts of water?\nNOTE: The jugs are oddly shaped, such that filling up exactly \u201ahalf\u2019 of the jug would \nbe impossible.", "id": 72}, {"answer": "A bunch of men are on an island. A genie comes down and gathers everyone to-gether and places a magical hat on some people\u2019s heads (i.e., at least one person has \na hat). The hat is magical: it can be seen by other people, but not by the wearer of \nthe hat himself. To remove the hat, those (and only those who have a hat) must dunk \nthemselves underwater at exactly midnight. If there are n people and c hats, how \nlong does it take the men to remove the hats? The men cannot tell each other (in any \nway) that they have a hat.\nFOLLOW UP\nProve that your solution is correct.", "question": "5\n0\nFilled 5 quart jug\n2\n3\nFilled 3Q with 5Q\u2019s contents\n0\n2\nDumped 3Q\n5\n2\nFilled 5Q\n4\n3\nFill remainder of 3Q with 5Q\n4\nDone! We have four quarts.\n \n\u00bb Many brain teasers have a math / CS root to them\u2014this is one of them! Note that as \nlong as the two jug sizes are relatively prime (i.e., have no common prime factors), you \ncan find a pour sequence for any value between 1 and the sum of the jug sizes.", "id": 74}, {"answer": "There is a building of 100 floors. If an egg drops from the Nth floor or above it will \nbreak. If it\u2019s dropped from any floor below, it will not break. You\u2019re given 2 eggs. Find \nN, while \nminimizing the number of drops for the worst case.", "question": "This problem seems hard, so let\u2019s simplify it by looking at specific cases.\nCase c = 1: \nExactly one man is wearing a hat.\nAssuming all the men are intelligent, the man with the hat should look around and realize \nthat no one else is wearing a hat. Since the genie said that at least one person is wearing \na hat, he must conclude that he is wearing a hat. Therefore, he would be able to remove it \nthat night.\nCase c = 2:\n Exactly two men are wearing hats.\nThe two men with hats see one hat, and are unsure whether c = 1 or c = 2. They know, from \nthe previous case, that if c = 1, the hats would be removed on Night #1. Therefore, if the other \nman still has a hat, he must deduce that c = 2, which means that he has a hat. Both men \nwould then remove the hats on Night #2\nCase General:\n If c = 3, then each man is unsure whether c = 2 or 3. If it were 2, the hats would \nbe removed on Night #2. If they are not, they must deduce that c = 3, and therefore they \nhave a hat. We can follow this logic for c = 4, 5, \u2013\n\nUsing induction to prove a statement P(n)\nIf (1) \nP(1) = TRUE (e.g., the statement is true when n = 1)\nAND (2) \nif P(n) = TRUE -> P(n+1) = TRUE (e.g., P(n+1) is true whenever P(2) is true).\nTHEN\n P(n) = TRUE for all n >= 1.\n\n \n\u00bb Condition 2 sets up an infinite deduction chain: P(1) implies P(2) implies P(3) implies ... \nP(n) implies P(n+1) implies ...\n\n \n\u00bb Condition one (P(1) is true) ignites this chain, with truth cascading off into infinity.\nBase Case: c = 1 (See previous page).\nAssume true for c hats. i.e., if there are c hats, it will take c nights to remove all of them.\nProve true for c+1 hats.\nEach man with a hat sees c hat, and can not be immediately sure whether there are c hats or \nc+1 hats. However, he knows that if there are c hats, it will take exactly c nights to remove \nthem. Therefore, when c nights have passed and everyone still has a hat, he can only con-clude that there are c+1 hats. He must know that he is wearing a hat. Each man makes the \nsame conclusion and simultaneously removes the hats on night c+1.\nTherefore, we have met the principles of induction. We have proven that it will take c nights \nto remove c hats.", "id": 76}, {"answer": "There are one hundred closed lockers in a hallway. A man begins by opening all one \nhundred lockers. Next, he closes every second locker. Then he goes to every third \nlocker and closes it if it is open or opens it if it is closed (e.g., he toggles every third \nlocker). After his one hundredth pass in the hallway, in which he toggles only locker \nnumber one hundred, how many lockers are open?", "question": "Observation\n: Regardless of how we drop Egg1, Egg2 must do a linear \nsearch. i.e., if Egg1 \nbreaks between floor 10 and 15, we have to check every floor in between with the Egg2\nThe Approach:\nA First Try: Suppose we drop an egg from the 10th floor, then the 20th, \u2013\n \n\u00bb If the first egg breaks on the first drop (Floor 10), then we have at most 10 drops total.\n \n\u00bb If the first egg breaks on the last drop (Floor 100), then we have at most 19 drops total \n(floors 10, 20, ...,90, 100, then 91 through 99).\n \n\u00bb That\u2019s pretty good, but all we\u2019ve considered is the absolute worst case. We should do \nsome \u201cload balancing\u201d to make those two cases more even.\nGoal: Create a system for dropping Egg1 so that the most drops required is consistent, \nwhether Egg1 breaks on the first drop or the last drop.\n1. A perfectly load balanced system would be one in which Drops of Egg1 + Drops of \nEgg2 is always the same, regardless of where Egg1 broke.\n2. For that to be the case, since each drop of Egg1 takes one more step, Egg2 is allowed \none fewer step.\n3. We must, therefore, reduce the number of steps potentially required by Egg2 by one \ndrop each time. For example, if Egg1 is dropped on Floor 20 and then Floor 30, Egg2 \nis potentially required to take 9 steps. When we drop Egg1 again, we must reduce \npotential Egg2 steps to only 8. That is, we must drop Egg1 at floor 39.\n4. We know, therefore, Egg1 must start at Floor X, then go up by X-1 floors, then X-2, \u2013, \nuntil it gets to 100.\n5. Solve for X+(X-1)+(X-2)+\u2013+1 = 100. X(X+1)/2 = 100 -> X = 14\nWe go to Floor 14, then 27, then 39, \u2013 This takes 14 steps maximum.", "id": 78}, {"answer": "Design the \ndata structures for a generic deck of cards. Explain how you would sub-class it to implement particular card games.", "question": "Question: For which rounds is a door toggled (open or closed)?\nA door n is toggled once for each factor of n, including itself and 1. That is, door 15 is toggled \non round 1, 3, 5, and 15.\nQuestion: \nWhen would a door be left open? \nAnswer: \nA door is left open if the number of factors (x) is odd. You can think about this by \npairing factors off as an open and a close. If there\u2019s one remaining, the door will be open.\nQuestion: \nWhen would x be odd?\nAnswer: \nx is odd if n is a perfect square. Here\u2019s why: pair n\u2019s factors by their complements. \nFor example, if n is 36, the factors are (1, 36), (2, 18), (3, 12), (4, 9), (6, 6). Note that (6, 6) only \ncontributes 1 factor, thus giving n an odd number of factors.\nQuestion: \nHow many perfect squares are there?\nAnswer: \nThere are 10 perfect squares. You could count them (1, 4, 9, 16, 25, 36, 49, 64, 81, \n100), or you could simply realize that you can take the numbers 1 through 10 and square \nthem (1*1, 2*2, 3*3, ..., 10*10).\nTherefore, there are 10 lockers open.", "id": 80}, {"answer": "Imagine you have a call center with three levels of employees: fresher, technical lead \n(TL), product manager (PM). There can be multiple employees, but only one TL or PM. \nAn incoming telephone call must be allocated to a fresher who is free. If a fresher \ncan\u2019t handle the call, he or she must escalate the call to technical lead. If the TL is \nnot free or not able to handle it, then the call should be escalated to PM. Design the \nclasses and data structures for this problem. Implement a method getCallHandler().", "question": "1 public class Card {\n2 public enum Suit { \n3 CLUBS (1), SPADES (2), HEARTS (3), DIAMONDS (4);\n4 int value;\n5 private Suit(int v) { value = v; }\n6 };\n7 \n8 private int card;\n9 private Suit suit;\n10 \n11 public Card(int r, Suit s) { \n12 card = r; \n13 suit = s; \n14 }\n15 \n16 public int value() { return card; }\n17 public Suit suit() { return suit; }\n18 }\nAssume that we\u2019re building a blackjack game, so we need to know the value of the cards. \nFace cards are ten and an ace is 11 (most of the time, but that\u2019s the job of the Hand class, not \nthe following class).\n1 public class BlackJackCard extends Card {\n2 public BlackJackCard(int r, Suit s) { super(r, s); }\n3 \n4 public int value() {\n5 int r = super.value();\n6 if (r == 1) return 11; // aces are 11\n7 if (r < 10) return r;\n8 return 10;\n9 }\n10 \n11 boolean isAce() { \n12 return super.value() == 1; \n13 } \n14 }", "id": 82}, {"answer": "Design a \nmusical juke box using object oriented principles.", "question": "All three ranks of employees have different work to be done, so those specific functions are \nprofile specific. We should keep these specific things within their respective class.\nThere are a few things which are common to them, like address, name, job title, age, etc. \nThese things can be kept in one class and can be extended / inherited by others.\nFinally, there should be one CallHandler class which would route the calls to the concerned \nperson.\nNOTE: On any object oriented design question, there are many ways to design \nthe objects. Discuss the trade-ofis of difierent solutions with your interviewer. \nYou should usually design for long term code \u02ddexibility and maintenance.\n1 public class CallHandler {\n2 \n\n3 \n4 ArrayList<Employee>[] employeeLevels = new ArrayList[LEVELS];\n5 // queues for each call\u2019s rank\n6 Queue<Call>[] callQueues = new LinkedList[LEVELS]; \n7 \n8 public CallHandler() { ... }\n9 \n10 Employee getCallHandler(Call call) {\n11 for (int level = call.rank; level < LEVELS - 1; level++) {\n12 ArrayList<Employee> employeeLevel = employeeLevels[level];\n13 for (Employee emp : employeeLevel) {\n14 if (emp.free) {\n15 return emp;\n16 }\n17 }\n18 }\n19 return null;\n20 }\n21 \n22 // routes the call to an available employee, or adds to a queue \n\n23 void dispatchCall(Call call) {\n24 // try to route the call to an employee with minimal rank\n25 Employee emp = getCallHandler(call);\n26 if (emp != null) {\n27 emp.ReceiveCall(call);\n28 } else {\n29 // place the call into queue according to its rank\n30 callQueues[call.rank].add(call);\n31 }\n32 }\n33 void getNextCall(Employee e) {...} // look for call for e\u2019s rank\n34 }\n35 \n36 class Call {\n37 int rank = 0; // minimal rank of employee who can handle this call\n38 public void reply(String message) { ... }\n39 public void disconnect() { ... }\n40 }\n41 \n42 class Employee {\n43 CallHandler callHandler;\n44 int rank; // 0- fresher, 1 - technical lead, 2 - product manager\n45 boolean free;\n46 Employee(int rank) { this.rank = rank; }\n47 void ReceiveCall(Call call) { ... }\n48 void CallHandled(Call call) { ... } // call is complete\n49 void CannotHandle(Call call) { // escalate call\n50 call.rank = rank + 1;\n51 callHandler.dispatchCall(call);\n52 free = true;\n53 callHandler.getNextCall(this); // look for waiting call\n54 }\n55 }\n56 \n57 class Fresher extends Employee { \n58 public Fresher() { super(0); }\n59 }\n60 class TechLead extends Employee { \n61 public TechLead() { super(1); } \n62 }\n63 class ProductManager extends Employee { \n64 public ProductManager() { super(2); } \n65 }", "id": 84}, {"answer": "Design a \nchess game using object oriented principles.", "question": "Let\u2019s first understand the basic system components:\n \n\u00bb CD player\n \n\u00bb CD\n \n\u00bb Display () (displays length of song, remaining time and playlist)\nNow, let\u2019s break this down further:\n \n\u00bb Playlist creation (includes add, delete, shunle etc sub functionalities)\n \n\u00bb CD selector\n \n\u00bb Track selector\n \n\u00bb Queueing up a song\n \n\u00bb Get next song from playlist\nA user also can be introduced:\n \n\u00bb Adding\n \n\u00bb Deleting\n \n\u00bb Credit information\nHow do we group this functionality based on Objects (data + functions which go together)?\nObject oriented design suggests wrapping up data with their operating functions in a single \nentity class.\n1 public class CD { }\n2 public class CDPlayer {\n3 private Playlist p;\n4 private CD c;\n5 public Playlist getPlaylist() { return p; }\n6 public void setPlaylist(Playlist p) { this.p = p; }\n7 public CD getCD() { return c; }\n8 public void setCD(CD c) { this.c = c; }\n9 public CDPlayer(Playlist p) { this.p = p; }\n10 public CDPlayer(CD c, Playlist p) { ... }\n11 public CDPlayer(CD c) { this.c = c; }\n12 public void playTrack(Song s) { ... }\n13 }\n14 \n15 public class JukeBox {\n\n16 private CDPlayer cdPlayer;\n17 private User user;\n18 private Set<CD> cdCollection;\n19 private TrackSelector ts;\n20 \n21 public JukeBox(CDPlayer cdPlayer, User user, Set<CD> cdCollection,\n22 TrackSelector ts) { ... }\n23 public Song getCurrentTrack() { return ts.getCurrentSong(); }\n24 public void processOneUser(User u) { this.user = u; }\n25 }\n26 \n27 public class Playlist {\n28 private Song track;\n29 private Queue<Song> queue;\n30 public Playlist(Song track, Queue<Song> queue) { ... }\n31 public Song getNextTrackToPlay(){ return queue.peek(); }\n32 public void queueUpTrack(Song s){ queue.add(s); }\n33 }\n34 \n35 public class Song {\n36 private String songName;\n37 }\n38 \n39 public class TrackSelector {\n40 private Song currentSong;\n41 public TrackSelector(Song s) { currentSong=s; }\n42 public void setTrack(Song s) { currentSong = s; }\n43 public Song getCurrentSong() { return currentSong; }\n44 }\n45 \n46 public class User {\n47 private String name;\n48 public String getName() { return name; }\n49 public void setName(String name) { this.name = name; }\n50 public long getID() { return ID; }\n51 public void setID(long iD) { ID = iD; }\n52 private long ID;\n53 public User(String name, long iD) { ... }\n54 public User getUser() { return this; }\n55 public static User addUser(String name, long iD) { ... }\n56 }", "id": 86}, {"answer": "Design the data structures for an \nonline book reader system.", "question": "1 public class ChessPieceTurn { };\n2 public class GameManager {\n3 void processTurn(PlayerBase player) { };\n4 boolean acceptTurn(ChessPieceTurn turn) { return true; };\n5 Position currentPosition;\n6 }\n7 \n8 public abstract class PlayerBase {\n9 public abstract ChessPieceTurn getTurn(Position p);\n10 }\n11 class ComputerPlayer extends PlayerBase {\n12 public ChessPieceTurn getTurn(Position p) { return null; }\n13 \n\n14 public PositionEstimator estimater;\n15 public PositionBackTracker backtracter;\n16 }\n17 public class HumanPlayer extends PlayerBase {\n18 public ChessPieceTurn getTurn(Position p) { return null; }\n19 }\n20 \n21 public abstract class ChessPieceBase {\n22 abstract boolean canBeChecked();\n23 abstract boolean isSupportCastle();\n24 }\n25 public class King extends ChessPieceBase { ... }\n26 public class Queen extends ChessPieceBase { ... }\n27 \n28 public class Position { // represents chess positions in compact form\n29 ArrayList<ChessPieceBase> black;\n30 ArrayList<ChessPieceBase> white;\n31 }\n32 \n33 public class PositionBackTracker {\n34 public static Position getNext(Position p) { return null; }\n35 }\n36 public class PositionEstimator {\n37 public static PositionPotentialValue estimate(Position p) { ... }\n38 }\n39 public abstract class PositionPotentialValue {\n40 abstract boolean lessThan(PositionPotentialValue pv);\n41 }", "id": 88}, {"answer": "30 for (User u : users) {\n31 if (u.getID() == ID) return u;\n32 }\n33 return null;\n34 }\n35 \n36 public static void addUser(long ID, String details, \n37 int accountType) {\n38 users.add(new User(ID, details, accountType));\n39 }\n40 \n41 public User(long iD, String details, int accountType) { ... }\n42 }\n43 \n44 public class OnlineReaderSystem {\n45 private Book b;\n46 private User u;\n47 public OnlineReaderSystem(Book b, User u) { ... }\n48 public void listenRequest() { }\n49 \n\n50 \n\n51 public void display() { }\n52 }\nThis design is a very simplistic implementation of such a system. We have a class for User to \nkeep all the information regarding the user, and an identifier to identify each user unique-ly. We can add functionality like registering the user, charging a membership amount and \nmonthly / daily quota, etc.\nNext, we have book class where we will keep all the book\u2019s information. We would also im-plement functions like add / delete / update books.\nFinally, we have a manager class for managing the online book reader system which would \nhave a listen function to listen for any incoming requests to log in. It also provides book \nsearch functionality and display functionality. Because the end user interacts through this \nclass, search must be implemented here.", "question": "Since the problem doesn\u2019t describe much about the functionality, let\u2019s assume we want to \ndesign a basic online reading system which provides the following functionality:\n \n\u00bb User membership creation and extension.\n \n\u00bb Searching the database of books\n \n\u00bb Reading the books\nTo implement these we may require many other functions, like get, set, update, etc. Objects \nrequired would likely include User, Book, and Library.\nThe following code / object oriented design describes this functionality:\n1 public class Book {\n2 private long ID;\n3 private String details; \n4 private static Set<Book> books;\n5 \n6 public Book(long iD, String details) { ... } \n7 public static void addBook(long iD, String details){\n8 books.add(new Book(iD, details));\n9 }\n10 \n11 public void update() { } \n12 public static void delete(Book b) { books.remove(b); }\n13 \n\n14 for (Book b : books) \n15 if(b.getID() == id) return b;\n16 return null;\n17 }\n18 }\n19 \n20 public class User {\n21 private long ID;\n22 private String details;\n23 private int accountType;\n24 private static Set<User> users;\n25 \n26 \n\n27 public void renewMembership() { ... }\n28 \n29", "id": 90}, {"answer": "1 class Edge {\n2 \n\n3 Piece parent; \n4 Type type; \n5 \n\n6 }\n7 class Piece {\n8 Edge left, right, top, bottom; \n9 Orientation solvedOrientation = ...; // 90, 180, etc\n10 } \n11 class Puzzle {\n12 Piece[][] pieces; /* Remaining pieces left to put away. */ \n13 Piece[][] solution;\n14 \n\n15 /* We\u2019re going to solve this by working our way in-wards, starting\n16 * with the corners. This is a list of the inside edges. */\n17 Edge[] exposed_edges; \n18 \n19 void sort() {\n20 /* Iterate through all edges, adding each to inners, outers,\n21 * etc, as appropriate. Look for the corners\u2014add those to \n22 \n23 * exposed_edges. */\n24 }\n25 \n26 void solve() {\n27 foreach edge1 in exposed_edges {\n28 /* Look for a match to edge1 */\n29 if (edge1.type == Edge.Type.inner) {\n30 foreach edge2 in outers {\n31 \n32 /* We found a match! Remove edge1 from \n33 * exposed_edges. Add edge2\u2019s piece to \n34 * solution. Check which edges of edge2 are\n35 * exposed, and add those to exposed_edges. */\n36 }\n37 }\n38 /* Do the same thing, swapping inner & outer. */\n39 }\n40 } \n\n41 }\n42 }\n1. We grouped the edges by their type. Because inners go with outers, and vice versa, \nthis enables us to go straight to the potential matches.\nWe keep track of the inner perimeter of the puzzle (exposed_edges) as we work our \nway inwards. exposed_edges is initialized to be the corner\u2019s edges.", "question": "Implement a\n jigsaw puzzle. Design the data structures and explain an algorithm to \nsolve the puzzle.", "id": 92}, {"answer": "What is our chat server?\nThis is something you should discuss with your interviewer, but let\u2019s make a couple of as-sumptions: imagine we\u2019re designing a basic chat server that needs to support a small num-ber of users. People have a contact list, they see who is online vs online, and they can send \ntext-based messages to them. We will not worry about supporting group chat, voice chat, \netc. We will also assume that contact lists are mutual: I can only talk to you if you can talk to \nme. Let\u2019s keep it simple.\nWhat specific actions does it need to support?\n \n\u00bb User A signs online\n \n\u00bb User A asks for their contact list, with each person\u2019s current status.\n \n\u00bb Friends of User A now see User A as online\n \n\u00bb User A adds User B to contact list\n \n\u00bb User A sends text-based message to User B\n \n\u00bb User A changes status message and/or status type\n \n\u00bb User A removes User B\n \n\u00bb User A signs online\nWhat can we learn about these requirements? \n \nWe must have a concept of users, add request status, online status, and messages. \nWhat are the core components? \nWe\u2019ll need a database to store items and an \u201calways online\u201d application as the server. We \nmight recommend using XML for the communication between the chat server and the \nclients, as it\u2019s easy for a person and a machine to read.\nWhat are the key objects and methods?\nWe have listed the key objects and methods below. Note that we have hidden many of the \ndetails, such as how to actually push the data out to a client.\n1 enum StatusType {\n2 \n\n3 }\n4 \n\n5 class Status {\n6 StatusType status_type;\n7 String status_message;\n8 }\n9 \n10 class User {\n11 String username;\n12 String display_name;\n13 User[] contact_list;\n14 AddRequest[] requests;\n15 boolean updateStatus(StatusType stype, String message) { \u2013 };\n16 boolean addUserWithUsername(String name);\n17 boolean approveRequest(String username);\n18 boolean denyRequest(String username);\n19 boolean removeContact(String username);\n20 boolean sendMessage(String username, String message);\n21 }\n22 /* Holds data that from_user would like to add to_user */\n23 class AddRequest {\n24 User from_user;\n25 User to_user;\n26 }\n27 class Server {\n28 User getUserByUsername(String username);\n29 }\nWhat problems would be the hardest to solve (or the most interesting)?", "question": "Explain how you would design a \nchat server. In particular, provide details about the \nvarious backend components, classes, and methods. What would be the hardest \nproblems to solve?", "id": 94}, {"answer": "How do we deal with conflicting information?\n We have some information stored in the computer\u2019s memory and some in the database. \nWhat happens if they get out of sync? Which one is \u201cright\u201d?", "question": "How do we know if someone is online\u2014I mean, really, really know?\n While we would like users to tell us when they sign off, we can\u2019t know for sure. A user\u2019s \nconnection might have died, for example. To make sure that we know when a user has \nsigned off, we might try regularly pinging the client to make sure it\u2019s still there.", "id": 96}, {"answer": "How we do prevent denial of service attacks?\n Clients can push data to us\u2014what if they try to DOS us? How do we prevent that?", "question": "How do we make our server scale?\n While we designed out chat server without worrying\u2014too much\u2013 about scalability, in \nreal life this would be a concern. We\u2019d need to split our data across many servers, which \nwould increase our concern about out of sync data.", "id": 98}, {"answer": "Othello has these major steps:\n2. Game () which would be the main function to manage all the activity in the game:\n3. Initialize the game which will be done by constructor\n4. Get first user input\n5. Validate the input\n6. Change board configuration\n7. Check if someone has won the game\n8. Get second user input\n9. Validate the input\n10. \nChange the board configuration\n11. \nCheck if someone has won the game...\nNOTE: \nThe full code for Othello is contained in the code attachment.\n1 public class Question {\n2 \n\n3 \n\n4 private int[][] board;\n5 \n6 /* Sets up the board in the standard othello starting positions, \n7 * and starts the game */\n8 public void start () { ... }\n9 \n10 /* Returns the winner, if any. If there are no winners, returns\n11 * 0 */\n12 private int won() {\n13 if (!canGo (white) && !canGo (black)) {\n14 int count = 0;\n\n15 for (int i = 0; i < 8; i++) {\n16 for (int j = 0; j < 8; j++) {\n17 if (board [i] [j] == white) {\n18 count++;\n19 }\n20 if (board [i] [j] == black) {\n21 count--;\n22 }\n23 }\n24 }\n25 if (count > 0) return white;\n26 if (count < 0) return black;\n27 return 3;\n28 }\n29 return 0;\n30 }\n31 \n32 \n\n33 * move in his turn. This will return false when\n34 * 1. none of his pieces are present\n35 * 2. none of his moves result in him gaining new pieces\n36 \n\n37 */\n38 private boolean canGo(int color) { ... }\n39 \n40 /* Returns if a move at coordinate (x,y) is a valid move for the\n41 \n\n42 private boolean isValid(int color, int x, int y) { ... }\n43 \n44 /* Prompts the player for a move and the coordinates for the move.\n45 * Throws an exception if the input is not valid or if the entered \n46 * coordinates do not make a valid move. */\n47 private void getMove (int color) throws Exception { ... }\n48 \n49 /* Adds the move onto the board, and the pieces gained from that \n50 * move. Assumes the move is valid. */\n51 private void add (int x, int y, int color) { ... }\n52 \n53 /* The actual game: runs continuously until a player wins */\n54 private void game() {\n55 printBoard();\n56 while (won() == 0) {\n57 boolean valid = false;\n58 while (!valid) {\n59 try {\n60 getMove(black);\n\n61 valid = true;\n62 } catch (Exception e) {\n63 System.out.println (\u201cEnter a valid coordinate!\u201d);\n64 }\n65 }\n66 valid = false;\n67 printBoard();\n68 while (!valid) {\n69 try {\n70 getMove(white);\n71 valid = true;\n72 } catch (Exception e) {\n73 System.out.println (\u201cEnter a valid coordinate!\u201d);\n74 }\n75 }\n76 printBoard ();\n77 }\n78 \n79 if (won()!=3) {\n80 System.out.println (won () == 1 ? \u201cwhite\u201d : \u201cblack\u201d + \n81 \u201c won!\u201d);\n82 } else {\n83 System.out.println(\u201cIt\u2019s a draw!\u201d);\n84 }\n85 }\n86 }", "question": "Othello is played as follows: Each Othello piece is white on one side and black on the \nother. When a piece is surrounded by its opponents on both the left and right sides, \nor both the top and bottom, it is said to be captured and its color is flipped. On your \nturn, you must capture at least one of your opponent\u2019s pieces. The game ends when \neither user has no more valid moves, and the win is assigned to the person with the \nmost pieces. Implement the object oriented design for \nOthello.", "id": 100}, {"answer": "For data block allocation, we can use bitmask vector and linear search (see \u201cPractical File \nSystem Design\u201d) or B+ \ntrees (see Wikipedia).\n1 struct DataBlock { char data[DATA_BLOCK_SIZE]; };\n2 DataBlock dataBlocks[NUM_DATA_BLOCKS];\n3 struct INode { std::vector<int> datablocks; };\n4 struct MetaData {\n5 int size;\n6 Date last_modifed, created;\n7 char extra_attributes;\n8 };\n9 std::vector<bool> dataBlockUsed(NUM_DATA_BLOCKS);\n10 std::map<string, INode *> mapFromName;\n11 struct FSBase;\n12 struct File : public FSBase {\n13 private:\n14 std::vector<INode> * nodes;\n15 MetaData metaData;\n16 };\n17 \n18 struct Directory : pubic FSBase { std::vector<FSBase* > content; };\n19 struct FileSystem {\n20 init();\n21 mount(FileSystem*);\n22 unmount(FileSystem*);\n23 File createFile(cosnt char* name) { ... }\n24 Directory createDirectory(const char* name) { ... }\n25 \n\n26", "question": "Explain the data structures and algorithms that you would use to design an \nin-mem-ory file system. Illustrate with an example in code where possible.", "id": 102}, {"answer": "Describe the \ndata structures and algorithms that you would use to implement a gar-bage collector in C++.", "question": "27 \n\n28 \n\n29 \n\n30 int position) { ... }\n31 };", "id": 104}, {"answer": "Write a method to generate the nth Fibonacci number.", "question": "In C++, garbage collection with reference counting is almost always implemented with \nsmart pointers, which perform reference counting. The main reason for using smart pointers \nover raw ordinary pointers is the conceptual simplicity of implementation and usage. \nWith smart pointers, everything related to garbage collection is performed behind the \nscenes - typically in constructors / destructors / assignment operator / explicit object man-agement functions. \nThere are two types of functions, both of which are very simple:\n1 RefCountPointer::type1() {\n2 /* implementation depends on reference counting organisation. \n3 * There can also be no ref. counter at all (see approach #4) */\n4 incrementRefCounter(); }\n5 \n6 RefCountPointer::type2() {\n7 /* Implementation depends on reference counting organisation. \n8 * There can also be no ref. counter at all (see approach #4). */\n9 decrementRefCounter();\n10 if (referenceCounterIsZero()) {\n11 destructObject();\n12 }\n13 }\nThere are several approaches for reference counting implementation in C++:\n1. Simple reference counting.\n1 struct Object { };\n2 struct RefCount {\n3 int count;\n4 };\n5 struct RefCountPtr {\n6 Object * pointee;\n7 RefCount * refCount;\n8 };\nAdvantages: performance.\nDisadvantages: memory overhead because of two pointers.\n2. Alternative reference counting.\n\n1 struct Object { \u2013 };\n2 struct RefCountPtrImpl {\n3 int count;\n4 Object * object;\n5 };\n6 struct RefCountPtr {\n7 RefCountPtrImpl * pointee;\n8 };\nAdvantages: no memory overhead because of two pointers.\nDisadvantages: performance penalty because of extra level of indirection.\n3. Intrusive reference counting.\n1 struct Object { \u2013 };\n2 struct ObjectIntrusiveReferenceCounting {\n3 Object object;\n4 int count;\n5 };\n6 struct RefCountPtr {\n7 ObjectIntrusiveReferenceCounting * pointee;\n8 };\nAdvantages: no previous disadvantages.\nDisadvantages: class for intrusive reference counting should be modified.\n4. Ownership list reference counting. It is an alternative for approach 1-3. For 1-3 it is only \nimportant to determine that counter is zero\u2014its actual value is not important. This is the \nmain idea of approach # 4. \nAll Smart-Pointers for given objects are stored in doubly-linked lists. The constructor of a \nsmart pointer adds the new node to a list, and the destructor removes a node from the list \nand checks if the list is empty or not. If it is empty, the object is deleted.\n1 struct Object { };\n2 struct ListNode {\n3 Object * pointee;\n4 ListNode * next;\n5 }", "id": 106}, {"answer": "Imagine a robot sitting on the upper left hand corner of an NxN grid. The robot can \nonly move in two directions: right and down. How many \npossible paths are there for \nthe robot?\nFOLLOW UP\nImagine certain squares are \u201coff limits\u201d, such that the robot can not step on them. \nDesign an algorithm to get all possible paths for the\n robot.", "question": "There are three potential approaches: (1) recursive approach (2) iterative approach (3) using \nmatrix math. We have described the recursive and iterative approach below, as you would \nnot be expected to be able to derive the \nmatrix-based approach in an interview. For the \ninterested math-geeks, you may read about the (most e\u02ddcient) matrix-based algorithm at \nhttp://en.wikipedia.org/wiki/Fibonacci_number#Matrix_form.\nRecursive Solution:\n1 \n2 if (n == 0) {\n3 return 0; // f(0) = 0\n4 } else if (n == 1) {\n5 return 1; // f(1) = 1\n6 } else if (n > 1) {\n7 \n8 } else {\n9 return \u20131; // Error condition\n10 }\n11 }\nIterative Solution:\n1 \n2 if (n < 0) return -1; // Error condition.\n3 if (n == 0) return 0;\n4 int a = 1, b = 1;\n5 for (int i = 3; i <= n; i++) {\n6 int c = a + b;\n7 a = b;\n8 b = c;\n9 } \n10 return b;\n11 }", "id": 108}, {"answer": "Write a method that returns all subsets of a set.", "question": "Part 1:\n (For clarity, we will solve this part assuming an X by Y grid)\nEach path has (X-1)+(Y-1) steps. Imagine the following paths: X X Y Y X (move right -> right -> down -> down -> right) X Y X Y X (move right -> down -> right -> down -> right) ...\nEach path can be fully represented by the moves at which we move right. That is, if I were to \nask you which path you took, you could simply say \u201cI moved right on step 3 and 4.\u201d\nSince you must always move right X-1 times, and you have X-1 + Y-1 total steps, you have \nto pick X-1 times to move right out of X-1+Y-1 choices. Thus, there are C(X-1, X-1+Y-1) paths \n(e.g., X-1+Y-1 choose X-1): (X-1 + Y-1)! / ((X-1)! * (Y-1)!)\nPart 2: Code\nWe can implement a simple recursive algorithm with backtracking:\n1 ArrayList<Point> current_path = new ArrayList<Point>();\n2 public static boolean getPaths(int x, int y) {\n3 Point p = new Point(x, y);\n4 current_path.add(p);\n5 if (0 == x && 0 == y) return true; // current_path\n6 boolean success = false;\n7 if (x >= 1 && is_free(x - 1, y)) { // Try right\n8 success = getPaths(x - 1, y); // Free! Go right\n9 }\n10 if (!success && y >= 1 && is_free(x, y - 1)) { // Try down\n11 success = getPaths(x, y - 1); // Free! Go down\n12 }\n13 if (!success) {\n14 current_path.remove(p); // Wrong way! \n15 }\n16 return success;\n17 }", "id": 110}, {"answer": "Write a method to compute all permutations of a string", "question": "We should first have some reasonable expectations of our time and space complexity. How \nmany subsets of a set are there? We can compute this by realizing that when we generate a \nsubset, each element has the \u201cchoice\u201d of either being in there or not. That is, for the first ele-ment, there are 2 choices. For the second, there are two, etc. So, doing 2 * 2 * ... * 2 n times \ngives us 2^n subsets. We will not be able to do better than this in time or space complexity.\nApproach #1: Recursion\nThis is a great problem to implement with recursion since we can build all subsets of a set us-ing all subsets of a smaller set. Specifically, given a set S, we can do the following recursively:\n \n\u00bb Let first = S[0]. Let smallerSet = S[1, ..., n]. \n \n\u00bb Compute all subsets of smallerSet and put them in allsubsets.\n \n\u00bb For each subset in allsubsets, clone it and add first to the subset.\nThe following code implements this algorithm:\n1 ArrayList<ArrayList<Integer>> getSubsets(ArrayList<Integer> set, \n2 int index) {\n3 ArrayList<ArrayList<Integer>> allsubsets;\n4 if (set.size() == index) {\n5 allsubsets = new ArrayList<ArrayList<Integer>>();\n6 allsubsets.add(new ArrayList<Integer>()); // Empty set\n7 } else {\n8 allsubsets = getSubsets(set, index + 1);\n9 int item = set.get(index);\n10 ArrayList<ArrayList<Integer>> moresubsets = \n11 new ArrayList<ArrayList<Integer>>();\n12 for (ArrayList<Integer> subset : allsubsets) {\n13 ArrayList<Integer> newsubset = new ArrayList<Integer>();\n14 newsubset.addAll(subset); //\n15 newsubset.add(item);\n16 moresubsets.add(newsubset);\n17 }\n18 allsubsets.addAll(moresubsets);\n19 }\n20 return allsubsets;\n21 }\nApproach #2: \nCombinatorics\n \n\u00bb When we\u2019re generating a set, we have two choices for each element: (1) the element is \n\nin the set (the \u201cyes\u201d state) or (2) the element is not in the set (the \u201cno\u201d state). This means \nthat each subset is a sequence of yesses / nos\u2014e.g., \u201cyes, yes, no, no, yes, no\u201d\n \n\u00bb This gives us 2\n^n possible subsets. How can we iterate through all possible sequences \nof \u201cyes\u201d / \u201cno\u201d states for all elements? If each \u201cyes\u201d can be treated as a 1 and each \u201cno\u201d can \nbe treated as a 0, then each subset can be represented as a binary string.\n \n\u00bb Generating all subsets then really just comes down to generating all binary numbers \n(that is, all integers). Easy!\n1 ArrayList<ArrayList<Integer>> getSubsets2(ArrayList<Integer> set) {\n2 ArrayList<ArrayList<Integer>> allsubsets = \n3 new ArrayList<ArrayList<Integer>>();\n4 int max = 1 << set.size(); \n5 for (int i = 0; i < max; i++) {\n6 ArrayList<Integer> subset = new ArrayList<Integer>();\n7 int k = i;\n8 int index = 0;\n9 while (k > 0) {\n10 if ((k & 1) > 0) {\n11 subset.add(set.get(index));\n12 }\n13 k >>= 1;\n14 index++;\n15 }\n16 allsubsets.add(subset);\n17 }\n18 return allsubsets;\n19 }", "id": 112}, {"answer": "Implement an algorithm to print all valid (e.g., properly opened and closed) combi-nations of n-pairs of parentheses.\nEXAMPLE:\ninput: 3 (e.g., 3 pairs of parentheses)\noutput: ()()(), ()(()), (())(), ((()))", "question": "Let\u2019s assume a given string S represented by the letters A1, A2, A3, ..., An\nTo permute set S, we can select the first character, A1, permute the remainder of the string to \nget a new list. Then, with that new list, we can \u201cpush\u201d A1 into each possible position.\nFor example, if our string is \u201cabc\u201d, we would do the following:\n1. Let first = \u201ca\u201d and let remainder = \u201cbc\u201d\n2. Let list = permute(bc) = {\u201cbc\u201d, \u201ccd\u201d}\n3. Push \u201ca\u201d into each location of \u201cbc\u201d (--> \u201cabc\u201d, \u201cbac\u201d, \u201cbca\u201d) and \u201ccb\u201d (--> \u201cacb\u201d, \u201ccab\u201d, \u201ccba\u201d)\n4. Return our new list\nNow, the code to do this:\n1 public static ArrayList<String> getPerms(String s) {\n2 ArrayList<String> permutations = new ArrayList<String>();\n3 if (s == null) { // error case\n4 return null;\n5 } else if (s.length() == 0) { // base case\n6 permutations.add(\u201c\u201d);\n7 return permutations;\n8 }\n9 \n10 \n\n11 \n12 ArrayList<String> words = getPerms(remainder);\n13 for (String word : words) {\n14 for (int j = 0; j <= word.length(); j++) {\n15 \n16 }\n17 }\n18 return permutations;\n19 }\n20 \n21 public static String insertCharAt(String word, char c, int i) {\n22 String start = word.substring(0, i);\n23 String end = word.substring(i);\n24 return start + c + end;\n25 }\nThis solution takes\n O(n!) time, since there are n! permutations.", "id": 114}, {"answer": "Implement the \u201cpaint fill\u201d function that one might see on many image editing pro-grams. That is, given a screen (represented by a 2-dimensional array of Colors), a \npoint, and a new color, fill in the surrounding area until you hit a border of that color.", "question": "We can solve this problem recursively by recursing through the string. On each iteration, we \nhave the index for a particular character in the string. We need to select either a left or a right \nparen. When can we use left, and when can we use a right paren?\n \n\u00bb Left: As long as we haven\u2019t used up all the left parentheses, we can always insert a left \nparen.\n \n\u00bb Right: We can insert a right paren as long as it won\u2019t lead to a syntax error. When will we \nget a syntax error? We will get a syntax error if there are more right parentheses than \nleft.\nSo, we simply keep track of the number of left and right parentheses allowed. If there are \nleft parens remaining, we\u2019ll insert a left paren and recurse. If there are more right parens \nremaining than left (eg, if there are more left parens used), then we\u2019ll insert a right paren and \nrecurse.\n1 public static void printPar(int l, int r, char[] str, int count) {\n2 if (l < 0 \n3 if (l == 0 && r == 0) {\n4 System.out.println(str); // found one, so print it\n5 } else {\n6 if (l > 0) { // try a left paren, if there are some available\n7 str[count] = \u201a(\u201a;\n8 printPar(l - 1, r, str, count + 1);\n9 }\n10 if (r > l) { // try a right paren, if there\u2019s a matching left\n11 str[count] = \u201a)\u2019;\n12 printPar(l, r - 1, str, count + 1);\n13 }\n14 } \n15 }\n16 \n17 public static void printPar(int count) {\n18 char[] str = new char[count*2];\n19 printPar(count, count, str, 0);\n20 }", "id": 116}, {"answer": "Given an infinite number of quarters (25 cents), dimes (10 cents), nickels (5 cents) and \npennies (1 cent), write code to calculate the number of ways of representing n cents.", "question": "First, let\u2019s visualize how this method works. When we call Paint Fill (eg, \u201cclick\u201d paint fill in the \nimage editing application) on, say, a green pixel, we want to \u201cbleed\u201d outwards. Pixel by pixel, \nwe expand outwards calling PaintFill on the surrounding pixel. When we hit a pixel that is \nnot green, we stop. Surrounding green pixels may still be painted if they are touched by \nanother Paint Fill operation.\nWe can implement this algorithm recursively:\n1 enum Color {\n2 Black, White, Red, Yellow, Green\n3 }\n4 boolean PaintFill(Color[][] screen, int x, int y, Color ocolor, \n5 Color ncolor) {\n6 if (x < 0 \n7 y < 0 \n8 return false;\n9 }\n10 if (screen[y][x] == ocolor) {\n11 screen[y][x] = ncolor;\n12 PaintFill(screen, x - 1, y, ocolor, ncolor); // left\n13 PaintFill(screen, x + 1, y, ocolor, ncolor); // right\n14 PaintFill(screen, x, y - 1, ocolor, ncolor); // top\n15 PaintFill(screen, x, y + 1, ocolor, ncolor); // bottom\n16 }\n17 return true;\n18 }\n19 \n20 boolean PaintFill(Color[][] screen, int x, int y, Color ncolor) {\n21 return PaintFill(screen, x, y, screen[y][x], ncolor);\n22 }", "id": 118}, {"answer": "Write an algorithm to print all ways of arranging eight queens on a chess board so \nthat none of them share the same row, column or diagonal.", "question": "This is a recursive problem, so let\u2019s figure out how to do makeChange(n) using prior solutions \n(i.e., sub-problems). Let\u2019s say n = 100, so we want to compute the number of ways of making \nchange of 100 cents. What\u2019s the relationship to its sub-problems?\nWe know that makeChange(100):\n= makeChange(100 using 0 quarters) + makeChange(100 using 1 quarter) + makeChange(100 \nusing 2 quarter) + makeChange(100 using 3 quarter) + makeChange(100 using 4 quarter)\nCan we reduce this further? Yes!\n= makeChange(100 using 0 quarters) + makeChange(75 using 0 quarter) + makeChange(50 \nusing 0 quarters) + makeChange(25 using 0 quarters) + 1\nNow what? We\u2019ve used up all our quarters, so now we can start applying our next biggest \ndenomination: dimes.\nThis leads to a recursive algorithm that looks like this:\n1 public static int makeChange(int n, int denom) {\n2 int next_denom = 0;\n3 switch (denom) {\n4 case 25:\n5 next_denom = 10;\n6 break;\n7 case 10:\n8 next_denom = 5;\n9 break;\n10 case 5:\n11 next_denom = 1;\n12 break;\n13 case 1:\n14 return 1;\n15 }\n16 int ways = 0;\n17 for (int i = 0; i * denom <= n; i++) {\n18 ways += makeChange(n - i * denom, next_denom);\n19 }\n20 return ways;\n21 }\n22 \n23 System.out.writeln(makeChange(n, 25));", "id": 120}, {"answer": "You are given two sorted arrays, A and B, and A has a large enough buffer at the end \nto hold B. Write a method to merge B into A in sorted order.", "question": "We will use a backtracking algorithm. For each row, the column where we want to put the \nqueen is based on checking that it does not violate the required condition.\n1. For this, we need to store the column of the queen in each row as soon as we have finalized \nit. Let ColumnForRow[] be the array which stores the column number for each row.\n2. The checks that are required for the three given conditions are:\n \n\u00bb On same Column : ColumnForRow[i] == ColumnForRow[j]\n \n\u00bb On same Diagonal: (ColumnForRow[i] - ColumnForRow[j] ) == ( i- j) or \n \n (ColumnForRow[j] - ColumnForRow[i]) == (i - j)\n1 int columnForRow[] = new int [8];\n2 boolean check(int row) {\n3 for (int i = 0; i < row; i++) {\n4 int diff = Math.abs(columnForRow[i] - columnForRow[row]);\n5 if (diff == 0 \n6 }\n7 return true;\n8 }\n9 \n10 void PlaceQueen(int row){\n11 if (row == 8) {\n12 printBoard();\n13 return; \n14 }\n15 for (int i = 0; i < 8; i++) {\n16 columnForRow[row]=i;\n17 if(check(row)){\n18 PlaceQueen(row+1);\n19 }\n20 }\n21 }", "id": 122}, {"answer": "Write a method to sort an array of strings so that all the anagrams are next to each \nother.", "question": "This code is a part of the standard merge-sort code. We merge A and B from the back, by \ncomparing each element.\n1 public static void merge(int[] a, int[] b, int n, int m) {\n2 int k = m + n - 1; // Index of last location of array b\n3 int i = n - 1; // Index of last element in array b\n4 int j = m - 1; // Index of last element in array a\n5 \n6 // Start comparing from the last element and merge a and b\n7 while (i >= 0 && j >= 0) {\n8 if (a[i] > b[j]) {\n9 a[k--] = a[i--];\n10 } else {\n11 a[k--] = b[j--];\n12 }\n13 }\n14 while (j >= 0) {\n15 a[k--] = b[j--];\n16 }\n17 }\nNote: You don\u2019t need to copy the contents of a after running out of b\u2019s. They are \nalready in place.", "id": 124}, {"answer": "Given a \nsorted array of n integers that has been rotated an unknown number of \ntimes, give an\n O(log n) algorithm that finds an element in the array. You may assume \nthat the array was originally sorted in increasing order.\nEXAMPLE:\nInput: find 5 in array (15 16 19 20 25 1 3 4 5 7 10 14)\nOutput: 8 (the index of 5 in the array)", "question": "The basic idea is to implement a normal sorting algorithm where you override the com-pareTo method to compare the \u201csignature\u201d of each string. In this case, the signature is the \nalphabetically sorted string.\n1 public class AnagramComparator implements Comparator<String> {\n2 public String sortChars(String s) {\n3 char[] content = s.toCharArray();\n4 Arrays.sort(content);\n5 return new String(content);\n6 }\n7 \n8 public int compare(String s1, String s2) {\n9 return sortChars(s1).compareTo(sortChars(s2));\n10 }\n11 }\nNow, just sort the arrays, using this compareTo method instead of the usual one.\n12 Arrays.sort(array, new AnagramComparator());", "id": 126}, {"answer": "If you have a 2 GB file with one string per line, which \nsorting algorithm would you use \nto sort the file and why?", "question": "We can do this with a modification of binary \nsearch.\n1 public static int search(int a[], int l, int u, int x) { \n2 while (l <= u) { \n3 int m = (l + u) / 2; \n4 if (x == a[m]) {\n5 return m; \n6 } else if (a[l] <= a[m]) {\n7 if (x > a[m]) {\n8 l = m+1; \n9 } else if (x >=a [l]) {\n10 u = m-1; \n11 } else {\n12 l = m+1;\n13 }\n14 } \n15 else if (x < a[m]) u = m-1; \n16 else if (x <= a[u]) l = m+1; \n17 else u = m - 1; \n18 }\n19 return -1; \n20 }\n21 \n22 public static int search(int a[], int x) {\n23 return search(a, 0, a.length - 1, x);\n24 }\nWhat about duplicates? You may observe that the above function doesn\u2019t give you an ef-ficient result in case of duplicate elements. However, if your array has duplicate entries then \nwe can\u2019t do better than O(n) which is as good as linear search. \nFor example, if the array is [2,2,2,2,2,2,2,2,3,2,2,2,2,2,2,2,2,2,2], there is no way to find element \n3 until you do a linear search.", "id": 128}, {"answer": "Given a sorted array of strings which is interspersed with empty strings, write a meth-od to \nfind the location of a given string. \nExample: find \u201cball\u201d in [\u201cat\u201d, \u201c\u201d, \u201c\u201d, \u201c\u201d, \u201cball\u201d, \u201c\u201d, \u201c\u201d, \u201ccar\u201d, \u201c\u201d, \u201c\u201d, \u201cdad\u201d, \u201c\u201d, \u201c\u201d] will return 4\n \nExample: find \u201cballcar\u201d in [\u201cat\u201d, \u201c\u201d, \u201c\u201d, \u201c\u201d, \u201c\u201d, \u201cball\u201d, \u201ccar\u201d, \u201c\u201d, \u201c\u201d, \u201cdad\u201d, \u201c\u201d, \u201c\u201d] will return -1", "question": "When an interviewer gives a size limit of 2GB, it should tell you something - in this case, it \nsuggests that they don\u2019t want you to bring all the data into memory.\nSo what do we do? We only bring part of the data into memory..\nAlgorithm:\nHow much memory do we have available? Let\u2019s assume we have X MB of memory available.\n1. Divide the file into K chunks, where X * K = 2 GB. Bring each chunk into memory and \nsort the lines as usual using any \nO(n log n) algorithm. Save the lines back to the file.\n2. Now bring the next chunk into memory and sort.\n3. Once we\u2019re done, merge them one by one. \nThe above algorithm is also known as external sort. Step 3 is known as N-way merge\nThe rationale behind using external sort is the size of data. Since the data is too huge and we \ncan\u2019t bring it all into memory, we need to go for a disk based sorting algorithm.", "id": 130}, {"answer": "Given a \nmatrix in which each row and each column is sorted, write a method to \nfind \nan element in it.", "question": "Use ordinary binary search, but when you hit an empty string, advance to the next non-empty string; if there is no next non-empty string, search the left half.\n1 public int search(\n2 \n\n3 // Ensure there is something at the end\n4 \n5 --last;\n6 }\n7 \n8 return -1; // this block was empty, so fail\n9 }\n10 \n11 while (strings[mid] == \u201c\u201d) {\n12 \n13 }\n14 int r = strings[mid].compareTo(str);\n15 if (r == 0) return mid;\n16 if (r < 0) {\n17 \n18 } else {\n19 last = mid - 1;\n20 }\n21 }\n22 return -1;\n23 }\n24 \n25 public int search(String[] strings, String str) {\n26 if (strings == null \n27 if (str == \u201c\u201d) {\n28 for (int i = 0; i < strings.length; i++) {\n29 if (strings[i] == \u201c\u201d) return i;\n30 }\n31 return -1;\n32 }\n33 return search(strings, str, 0, strings.length - 1);\n34 }", "id": 132}, {"answer": "A circus is designing a tower routine consisting of people standing atop one anoth-er\u2019s shoulders. For practical and aesthetic reasons, each person must be both shorter \nand lighter than the person below him or her. Given the heights and weights of each \nperson in the circus, write a method to compute the largest possible number of peo-ple in such a tower.\nEXAMPLE:\nInput (ht, wt): (65, 100) (70, 150) (56, 90) (75, 190) (60, 95) (68, 110)\nOutput: The longest tower is length 6 and includes from top to bottom: (56, 90) \n(60,95) (65,100) (68,110) (70,150) (75,190)", "question": "Assumptions:\n \n\u00bb Rows are sorted left to right in ascending order. Columns are sorted top to bottom in \nascending order.\n \n\u00bb Matrix is of size MxN.\nThis algorithm works by elimination. Every move to the left (--col) eliminates all the elements \nbelow the current cell in that column. Likewise, every move down eliminates all the elements \nto the left of the cell in that row.\n1 boolean FindElem(int[][] mat, int elem, int M, int N) {\n2 int row = 0;\n3 int col = N-1; \n4 while (row < M && col >= 0) {\n5 if (mat[row][col] == elem) {\n6 return true;\n7 } else if (mat[row][col] > elem) {\n8 col--;\n9 } else {\n10 row++; \n11 }\n12 } \n13 return false; \n14 }", "id": 134}, {"answer": "You have a basketball hoop and someone says that you can play 1 of 2 games.\nGame #1: You get one shot to make the hoop.\nGame #2: You get three shots and you have to make 2 of 3 shots.\nIf p is the \nprobability of making a particular shot, for which values of p should you pick \none game or the other?", "question": "Step 1. Sort all items by height first, and then by weight. This means that if all the heights are \nunique, then the items will be \nsorted by their height. If heights are the same, items will be \nsorted by their weight. \nExample:\n \n\u00bb Before sorting: (60, 100) (70, 150) (56, 90) (75, 190) (60, 95) (68,110).\n \n\u00bb After sorting: (56, 90), (60, 95), (60,100), (68, 110), (70,150), (75,190).\nStep 2. Find the longest sequence which contains increasing heights and increasing \nweights. \nTo do this, we:\n \na) Start at the beginning of the sequence. Currently, max_sequence is empty.\n \nb) If, for the next item, the height and the weight is not greater than those of the previous \nitem, we mark this item as \u201cunfit\u201d .\n \n(60,95)\n(65,100)\n(75,80)\n(80, 100)\n(unfit item)\n \nc) If the sequence found has more items than \u201cmax sequence\u201d, it becomes \u201cmax sequence\u201d.\n \nd) After that the search is repeated from the \u201cunfit item\u201d, until we reach the end of the origi-nal sequence.\n1 public class Question {\n2 ArrayList<HtWt> items;\n3 ArrayList<HtWt> lastFoundSeq;\n4 ArrayList<HtWt> maxSeq;\n5 \n\n6 // Returns longer sequence\n7 ArrayList<HtWt> seqWithMaxLength(ArrayList<HtWt> seq1,\n8 ArrayList<HtWt> seq2) {\n9 return seq1.size() > seq2.size() ? seq1 : seq2;\n10 }\n11 \n12 \n\n13 \n\n14 \n15 if (startFrom < items.size()) {\n16 for (int i = 0; i < items.size(); i++) {\n17 HtWt item = items.get(i);\n18 if (i == 0 \n19 seq.add(item);\n20 } else {\n21 \n22 }\n23 }\n24 }\n25 \n26 }\n27 \n28 // Find the maximum length sequence\n29 \n\n30 Collections.sort(items);\n31 \n32 \n33 ArrayList<HtWt> nextSeq = new ArrayList<HtWt>();\n34 \n35 maxSeq = seqWithMaxLength(maxSeq, nextSeq);\n36 \n37 \n38 }\n39 } \n40 }", "id": 136}, {"answer": "There are three ants on different vertices of a triangle. What is the probability of colli-sion (between any two or all of them) if they start walking on the sides of the triangle?\nSimilarly find the \nprobability of collision with \u201an\u2019 ants on an \u201an\u2019 vertex polygon.", "question": "Probability of winning Game 1: p\nProbability of winning Game 2:\nLet s(k,n) be the probability of making exactly k shots out of n. The probability of win-ning game 2 is s(2, 3)+s(3, 3). Since, s(k, n) = C(n, k) ( 1- p)^(n - k) p^k, the probability of \nwinning is 3 * (1 - p) * p^2 + p^3.\nSimplified, it becomes 3 * p^2 - 2 * p^3.\nYou should play Game1 if P(Game1) > P(Game2):p > 3*p^2 - 2*p^3.1 > 3*p - 2*p^22*p^2 - 3*p + 1 > 0(2p - 1)(p - 1) > 0\nBoth terms must be positive or both must be negative. But we know p < 1, so (p - 1) < 0. This \nmeans both terms must be negative.(2p - 1) < 02p < 1p < .5\nSo, we should play Game1 if p < .5.", "id": 138}, {"answer": "Given two lines on a Cartesian plane, determine whether the two lines would inter-sect.", "question": "None of the three ants will collide if all three are moving in clockwise direction, or all three \nare moving in a counter-clockwise direction. Otherwise, there will definitely be a collision.\nHow many ways are there for the three ants to move? Each ant can move in 2 directions, so \nthere are 2^3 ways the ant can move. There are only two ways which will avoid a collision, \ntherefore the probability of collision is (2^3 \u2013 2) / (2^3) = 6 / 8 = 3 / 4.\nTo generalize this to an n-vertex polygon: there are still only 2 ways in which the ants can \nmove to avoid a collision, but there are 2^n ways they can move total. Therefore, in general, \nprobability of collision is (2^n \u2013 2) / 2^n = 1 \u2013 1/2^(n-1).", "id": 140}, {"answer": "Write a method to implement *, - , / operations. You should use only the + operator.", "question": "There are a lot of unknowns in this problem (what format are the lines in? What if they are the \nsame line?), but let\u2019s\n assume:\n \n\u00bb If two lines are the same (same line = same slope and y-intercept), they are considered \nto intersect.\n \n\u00bb We get to decide the data structure.\n1 public class Line {\n2 static double epsilon = 0.000001;\n3 public double slope;\n4 public double yintercept;\n5 \n6 public Line(double s, double y) {\n7 slope = s;\n8 yintercept = y;\n9 }\n10 \n11 public boolean intersect(Line line2) {\n12 return Math.abs(slope - line2.slope) > epsilon \n13 Math.abs(yintercept - line2.yintercept) < epsilon;\n14 }\n15 }\n \n\u00bb Ask questions. This question has a lot of unknowns\u2014ask questions to clarify them. Many \ninterviewers intentionally ask vague questions to see if you\u2019ll clarify your assumptions.\n \n\u00bb When possible, design and use data structures. It shows that you understand and care \nabout object oriented design.\n \n\u00bb Think through which data structures you design to represent a line. There are a lot of \noptions, with lots of trade offs. Pick one and explain your choice.\n \n\u00bb Don\u2019t assume that the slope and y-intercept are integers.\n \n\u00bb Understand limitations of floating point representations. Never check for equality with \n==.", "id": 142}, {"answer": "Given two squares on a two dimensional plane, find a line that would cut these two \nsquares in half.", "question": "With an understanding of what each operation (minus, times, divide) does, this problem can \nbe approached logically.\n \n\u00bb Subtraction should be relatively straightforward, as we all know that a - b is the same \nthing as a + (-1)*b.\n \n\u00bb Multiplication: we have to go back to what we learned in grade school: 21 * 3 = 21 + 21 \n+ 21. It\u2019s slow, but it works.\n \n\u00bb Division is the trickiest, because we usually think of 21 / 3 as something like \u201cif you divide \na 21 foot board into 3 pieces, how big is each piece?\u201d If we think about it the other way \naround, it\u2019s a little easier: \u201cI divided a 21 foot board in x pieces and got pieces of 3 feet \neach, how many pieces were there?\u201d From here, we can see that if we continuously sub-tract 3 feet from 21 feet, we\u2019ll know how many pieces there are. \nThat is, we continuously \nsubtract b from a and count how many times we can do that.\n1 /* Flip a positive sign to negative, or a negative sign to pos */\n2 public static int FnNegate(int a) {\n3 int neg = 0;\n4 int d = a < 0 ? 1 : -1;\n5 while (a != 0) {\n6 neg += d;\n7 a += d;\n8 }\n9 return neg;\n10 }\n11 \n12 /* Subtract two numbers by negating b and adding them */\n13 public static int FnMinus(int a, int b) {\n14 return a + FnNegate(b);\n15 }\n16 \n17 /* Check if a and b are different signs */\n18 public static boolean DifferentSigns(int a, int b) {\n19 return ((a < 0 && b > 0) \n20 }\n21 \n22 /* Return absolute value */\n23 public static int abs(int a) {\n24 if (a < 0) return FnNegate(a);\n25 else return a;\n26 }\n\n27 \n28 /* Multiply a by b by adding a to itself b times */\n29 public static int FnTimes(int a, int b) {\n30 if (a < b) return FnTimes(b, a); // algo is faster if b < a\n31 int sum = 0;\n32 for (int iter = abs(b); iter > 0; --iter) sum += a;\n33 if (b < 0) sum = FnNegate(sum);\n34 return sum;\n35 }\n36 \n37 /* Divide a by b by literally counting how many times does b go into\n38 * a. That is, count how many times you can subtract b from a until\n39 * you hit 0. */\n40 public static int FnDivide(int a, int b) throws \n41 java.lang.ArithmeticException {\n42 if (b == 0) {\n43 throw new java.lang.ArithmeticException(\u201cDivide by 0.\u201d);\n44 }\n45 int quotient = 0;\n46 int divisor = FnNegate(abs(b));\n47 int divend; /* dividend */\n48 for (divend = abs(a); divend >= abs(divisor); divend += divisor) {\n49 ++quotient;\n50 }\n51 if (DifferentSigns(a, b)) quotient = FnNegate(quotient);\n52 return quotient;\n53 }\n \n\u00bb A logical approach of going back to what exactly multiplication and division do comes \nin handy. Remember that. All (good) interview problems can be approached in a logi-cal, methodical way!\n \n\u00bb The interviewer is looking for this sort of logical work-your-way-through-it approach.\n \n\u00bb This is a great problem to demonstrate your ability to write clean code\u2014specifically, \nto show your ability to re-use code. For example, if you were writing this solution and \ndidn\u2019t put FnNegate in its own method, you should move it out once you see that you\u2019ll \nuse it multiple times.\n \n\u00bb Be careful about making assumptions while coding. Don\u2019t assume that the numbers are \nall positive, or that a is bigger than b.", "id": 144}, {"answer": "Given a two dimensional graph with points on it, find a line which passes the most \nnumber of points.", "question": "Any line that goes through the center of a rectangle must cut it in half. Therefore, if you drew \na line connecting the centers of the two squares, it would cut both in half.\n1 public class Square {\n2 public double left;\n3 public double top;\n4 public double bottom;\n5 public double right;\n6 public Square(double left, double top, double size) {\n7 this.left = left;\n8 this.top = top;\n9 this.bottom = top + size;\n10 this.right = left + size;\n11 }\n12 \n13 public Point middle() {\n14 return new Point((this.left + this.right) / 2, \n15 (this.top + this.bottom) / 2);\n16 }\n17 \n18 public Line cut(Square other) {\n19 Point middle_s = this.middle();\n20 Point middle_t = other.middle();\n21 if (middle_s == middle_t) {\n22 return new Line(new Point(left, top), \n23 new Point(right, bottom));\n24 } else {\n25 return new Line(middle_s, middle_t);\n26 }\n27 }\n28 }\nThe main point of this problem is to see how careful you are about coding. It\u2019s easy to glance \nover the special cases (e.g., the two squares having the same middle). Make a list of these \nspecial cases \nbefore\n you start the problem and make sure to handle them appropriately.", "id": 146}, {"answer": "Design an algorithm to find the kth number such that the only prime factors are 3, \n5, and 7.", "question": "If we draw a line between every two points, we can check to see which line is the most com-mon. A brute force approach would be to simply iterate through each line segment (formed \nby pairs of points) and count how many points fall on it. \nThis would take O(N^3) time.\nBefore we discuss if we can do better, let\u2019s figure out how we can represent a line. A line can \nbe represented in (at least) two different ways: (1) as a pairing of points or (2) as a slope and \na y-intercept. \nBecause our line is infinite, the slope and y-intercept approach seems more appropriate. \nThe slope and y-intercept approach has an additional advantage: every line segment on the \nsame greater line will have identical slopes and y-intercepts. \nLet\u2019s re-think our solution. We have a bunch of line segments, represented as a slope and \ny-intercept, and we want to \nfind the most common slope and y-intercept. How can we find \nthe most common one?\nThis is really no different than the old \u201cfind the most common number in a list of numbers\u201d \nproblem. We just iterate through the lines segments and use a hash table to count the num-ber of times we\u2019ve seen each line.\n1 \n2 Line bestLine = null;\n3 HashMap<Line, Integer> line_count = new HashMap<Line, Integer>();\n4 for (int i = 0; i < points.length; i++) {\n5 for (int j = i + 1; j < points.length; j++) {\n6 Line line = new Line(points[i], points[j]);\n7 if (!line_count.containsKey(line)) {\n8 line_count.put(line, 0);\n9 }\n10 line_count.put(line, line_count.get(line) + 1); \n11 if (bestLine == null \n12 line_count.get(line) > line_count.get(bestLine)) {\n13 bestLine = line;\n14 }\n15 }\n16 } \n17 return bestLine;\n18 } \n19 \n20 public class Line {\n21 private static double epsilon = .0001;\n\n22 public double slope;\n23 public double intercept;\n24 \n\n25 public Line(GraphPoint p, GraphPoint q) {\n26 if (Math.abs(p.x - q.x) > epsilon) { // if x\u2019s are different\n27 slope = (p.y - q.y) / (p.x - q.x); // compute slope\n28 intercept = p.y - slope * p.x; // y intercept from y=mx+b\n29 } else {\n30 \n31 \n32 }\n33 }\n34 \n35 public boolean isEqual(double a, double b) {\n36 return (Math.abs(a - b) < epsilon);\n37 }\n38 \n39 @Override \n40 public int hashCode() {\n41 int sl = (int)(slope * 1000);\n42 int in = (int)(intercept * 1000);\n43 return sl \n44 } \n45 \n46 @Override \n47 public boolean equals(Object o) { \n48 Line l = (Line) o;\n49 if (isEqual(l.slope, slope) && isEqual(l.intercept, intercept) \n50 \n51 return true;\n52 }\n53 return false;\n54 } \n55 }\n \n\u00bb Be careful about the calculation of the slope of a line. The line might be completely \nvertical. We can keep track of this in a separate flag (infinite_slope). We need to check \nthis condition in the equals method.\n \n\u00bb Remember that when we perform division to calculate the slope, division is not exact. \nTherefore, rather than checking to see if two slopes are exactly equal, we need to check \nif they\u2019re different by greater than epsilon.", "id": 148}, {"answer": "If you were integrating a feed of end of day stock price information (open, high, low, \nand closing price) for 5,000 companies, how would you do it? You are responsible for \nthe development, rollout and ongoing monitoring and maintenance of the feed. De-scribe the different methods you considered and why you would recommend your \napproach. The feed is delivered once per trading day in a comma-separated format \nvia an FTP site. The feed will be used by 1000 daily users in a web application.", "question": "Any such number will look like (3^i)*(5^j)*(7^k). Here are the first 13 numbers:\n1-3^0 * 5^0 * 7 ^ 0\n3\n3\n3^1 * 5^0 * 7 ^ 0\n5\n5\n3^0 * 5^1 * 7 ^ 0\n7\n7\n3^0 * 5^0 * 7 ^ 1\n9\n3*3\n3^2 * 5^0 * 7 ^ 0\n15\n3*5\n3^1 * 5^1 * 7 ^ 0\n21\n3*7\n3^1 * 5^0 * 7 ^ 1\n25\n5*5\n3^0 * 5^2 * 7 ^ 0\n27\n3*9\n3^3 * 5^0 * 7 ^ 0\n35\n5*7\n3^0 * 5^1 * 7 ^1\n45\n5*9\n3^2 * 5^1 * 7 ^0\n49\n7*7\n3^0 * 5^0 * 7 ^2\n63\n3*21\n3^2 * 5^0 * 7 ^1\n \n\u00bb 3 * (previous number in list)\n \n\u00bb 5 * (previous number in list)\n \n\u00bb 7 * (previous number in list)\nHow would we find the next number in the list? Well, we could multiply 3, 5 and 7 times each \nnumber in the list and find the smallest element that has not yet been added to our list. This \nsolution is O(n^2). Not bad, but I think we can do better.\nIn our current algorithm, we\u2019re doing 3*1, 3*3, 3*5, 3*7, 3*9, 3*15, 3*21, 3*25 \u2013, and the same \nfor 5 and 7. We\u2019ve already done almost all this work before\u2014why are we doing it again?\nWe can fix this by multiplying each number we add to our list by 3, 5, 7 and putting the re-sults in one of the three first-in-first-out \nqueues. To look for the next \u201cmagic\u201d number, we pick \nthe smallest element in the three queues. Here is the algorithm:1. Initialize array magic and queues Q3, Q5 and Q72. Insert 1 into magic.3. Insert 1*3, 1*5 and 1*7 into Q3, Q5 and Q7 respectively.4. Let x be the minimum element in Q3, Q5 and Q7. Append x to magic.5. If x was found in: \n\n1 public static int getKthMagicNumber(int k) {\n2 if (k <= 0) return 0;\n3 int val = 1;\n4 Queue<Integer> Q3 = new LinkedList<Integer>();\n5 Queue<Integer> Q5 = new LinkedList<Integer>();\n6 Queue<Integer> Q7 = new LinkedList<Integer>();\n7 Q3.add(3);\n8 Q5.add(5);\n9 Q7.add(7);\n10 for (--k; k > 0; --k) { // We\u2019ve done one iteration already.\n11 val = Math.min(Q3.peek().intValue(), \n12 Math.min(Q5.peek().inValue(), Q7.peek().intValue()));\n13 if (val == Q7.peek()) {\n14 Q7.remove();\n15 } else {\n16 if (val == Q5.peek()) {\n17 Q5.remove();\n18 } else { // must be from Q3\n19 Q3.remove();\n20 Q3.add(val * 3);\n21 }\n22 Q5.add(val * 5);\n23 }\n24 Q7.add(val * 7);\n25 }\n26 return val;\n27 }\nWhen you get this question, do your best to solve it\u2014even though it\u2019s really di\u02ddcult. Explain \na brute force approach (not as tricky) and then start thinking about how you can optimize it. \nOr, try to find a pattern in the numbers.\nChances are, your interviewer will help you along when you get stuck. Whatever you do, \ndon\u2019t give up! Think out loud, wonder aloud, explain your thought process. Your interviewer \nwill probably jump in to guide you.", "id": 150}, {"answer": "\u00bb Very easy to distribute. This is one reason that XML is a standard data model to share /\ndistribute data.\n \n\u00bb E\u02ddcient parsers are available to parse the data and extract out only desired data. \n \n\u00bb We can add new data to the XML file by carefully appending data. We would not have \nto re-query the database.\nHowever, querying the data could be di\u02ddcult.", "question": "Let\u2019s assume we have some scripts which are scheduled to get the data via FTP at the end of \nthe day. Where do we store the data? How do we store\n the data in such a way that we can \ndo various analyses of it?\n\nKeep the data in text files. This would be very di\u02ddcult to manage and update, as well as very \nhard to query. Keeping unorganized text files would lead to a very ine\u02ddcient data model.\n\nWe could use a database. This provides the following benefits:\n \n\u00bb Logical storage of data.\n \n\u00bb Facilitates an easy way of doing query processing over the data. \nExample: return all stocks having open > N AND closing price < M\nAdvantages: \n \n\u00bb Makes the maintenance easy once installed properly.\n \n\u00bb Roll back, backing up data, and security could be provided using standard database \nfeatures. We don\u2019t have to \u201creinvent the wheel.\u201d\n\nIf requirements are not that broad and we just want to do a simple analysis and distribute the \ndata, then XML could be another good option.\nOur data has fixed format and fixed size: company_name, open, high, low, closing price. The \nXML could look like this:<root><date value=\u201c2008-10-12\u201d> <company name=\u201cfoo\u201d> <open>126.23</open> <high>130.27</high> <low>122.83</low>", "id": 152}, {"answer": "Approach:\n \nForget that we\u2019re dealing with millions of users at first. Design this for the simple case.\nWe can construct a \ngraph by assuming every person is a node and if there is an edge be-tween two nodes, then the two people are friends with each other.class Person { Person[] friends; // Other info}\nIf I want to find the connection between two people, I would start with one person and do a \nsimple breadth first search.\nBut... oh no! Millions of users!\nWhen we deal with a service the size of Orkut or Facebook, we cannot possibly keep all of our \ndata on one machine. That means that our simple Person data structure from above doesn\u2019t \nquite work\u2014our friends may not live on the same machine as us. Instead, we can replace our \nlist of friends with a list of their IDs, and traverse as follows:\n1. For each friend ID: int machine_index = lookupMachineForUserID(id);\n2. Go to machine machine_index\n3. Person friend = lookupFriend(machine_index);\nThere are more optimizations and follow up questions here than we could possibly discuss, \nbut here are just a few thoughts.\nOptimization: Reduce Machine Jumps\nJumping from one machine to another is expensive. Instead of randomly jumping from ma-chine to machine with each friend, try to batch these jumps\u2014e.g., if 5 of my friends live on \none machine, I should look them up all at once.\nOptimization: Smart Division of People and Machines\nPeople are much more likely to be friends with people who live in the same country as them. \nRather than randomly dividing people up across machines, try to divvy them up by country, \ncity, state, etc. This will reduce the number of jumps.\nQuestion: \nBreadth First Search usually requires \u201cmarking\u201d a node as visited. How do you do that in \n\nthis case?\nUsually, in BFS, we mark a node as visited by setting a flag visited in its node class. Here, we \ndon\u2019t want to do that (there could be multiple searches going on at the same time, so it\u2019s bad \nto just edit our data). In this case, we could mimic the marking of nodes with a hash \ntable to \nlookup a node id and whether or not it\u2019s been visited.\nOther Follow-Up Questions:\n \n\u00bb In the real world, servers fail. How does this affect you?\n \n\u00bb How could you take advantage of caching?\n \n\u00bb Do you search until the end of the graph (infinite)? How do you decide when to give up?\n \n\u00bb In real life, some people have more friends of friends than others, and are therefore \nmore likely to make a path between you and someone else. How could you use this data \nto pick where you start traversing?\nThe following code demonstrates our algorithm:\n1 public class Server {\n2 ArrayList<Machine> machines = new ArrayList<Machine>();\n3 }\n4 \n5 public class Machine {\n6 public ArrayList<Person> persons = new ArrayList<Person>();\n7 public int machineID;\n8 }\n9 \n10 public class Person {\n11 private ArrayList<Integer> friends;\n12 private int ID;\n13 private int machineID;\n14 private String info;\n15 private Server server = new Server();\n16 \n17 public String getInfo() { return info; }\n18 public void setInfo(String info) {\n19 this.info = info;\n20 }\n21 \n22 public int[] getFriends() {\n23 int[] temp = new int[friends.size()];\n24 for (int i = 0; i < temp.length; i++) {\n25 temp[i] = friends.get(i);\n26 }\n27 return temp;\n28 }\n\n29 public int getID() { return ID; }\n30 public int getMachineID() { return machineID; }\n31 public void addFriend(int id) { friends.add(id); }\n32 \n33 // Look up a person given their ID and Machine ID\n34 public Person lookUpFriend(int machineID, int ID) {\n35 for (Machine m : server.machines) {\n36 if (m.machineID == machineID) {\n37 for (Person p : m.persons) {\n38 if (p.ID == ID){\n39 return p; \n40 }\n41 }\n42 }\n43 }\n44 return null;\n45 }\n46 \n47 // Look up a machine given the machine ID\n48 public Machine lookUpMachine(int machineID) {\n49 for (Machine m:server.machines) {\n50 if (m.machineID == machineID)\n51 return m;\n52 }\n53 return null;\n54 }\n55 \n56 public Person(int iD, int machineID) {\n57 ID = iD;\n58 this.machineID = machineID;\n59 }\n60 }", "question": "How would you design the data \nstructures for a very large social network (Facebook, \nLinkedIn, etc)? Describe how you would design an algorithm to show the connec-tion, or path, between two people (e.g., Me -> Bob -> Susan -> Jason -> You).", "id": 154}, {"answer": "There are a total of 2^32, or 4 billion, distinct integers possible. We have 1 GB of memory, or \n8 billion bits.\nThus, with 8 billion bits, we can map all possible integers to a distinct bit with the available \nmemory. The logic is as follows:\n1. Create a bit vector (BV) of size 4 billion.\n2. Initialize BV with all 0\u2019s\n3. Scan all numbers (num) from the file and write BV[num] = 1;\n4. Now scan again BV from 0th index\n5. Return the first index which has 0 value.\n1 byte[] \n\n2 \n3 \n\n4 while (in.hasNextInt()) {\n5 int n = in.nextInt ();\n6 \n7 * OR operator to set the nth bit of a byte (e.g.. 10 would\n8 * correspond to the 2nd bit of index 2 in the byte array). */\n9 \n10 }\n11 \n12 \n\n13 for (int j = 0; j < 8; j++) {\n14 /* Retrieves the individual bits of each byte. When 0 bit\n15 \n16 \n17 System.out.println (i * 8 + j);\n18 return;\n19 }\n20 }\n21 } \n22 }\n\nFollow Up: What if we have only 10 MB memory?\nIt\u2019s possible to find a missing integer with just two passes of the data set. We can divide up \nthe integers into blocks of some size (we\u2019ll discuss how to decide on a size later). Let\u2019s just as-sume that we divide up the integers into blocks of 1000. So, block 0 represents the numbers \n0 through 999, block 1 represents blocks 1000 - 1999, etc. Since the range of ints is finite, we \nknow that the number of blocks needed is finite.\nIn the first pass, we count how many ints are in each block. That is, if we see 552, we know \nthat that is in block 0, we increment counter[0]. If we see 1425, we know that that is in block \n1, so we increment counter[1].\nAt the end of the first pass, we\u2019ll be able to quickly spot a block that is missing a number. If \nour block size is 1000, then any block which has fewer than 1000 numbers must be missing a \nnumber. Pick any one of those blocks.\nIn the second pass, we\u2019ll actually look for which number is missing. We can do this by creat-ing a simple bit vector of size 1000. We iterate through the file, and for each number that \nshould be in our block, we set the appropriate bit in the bit vector. By the end, we\u2019ll know \nwhich number (or numbers) is missing.\nNow we just have to decide what the block size is.\nA quick answer is 2^20 values per block. We will need an array with 2^12 block counters and \na bit vector in 2^17 bytes. Both of these can comfortably fit in 10*2^20 bytes.\nWhat\u2019s the smallest footprint? When the array of block counters occupies the same memory \nas the bit vector. Let N = 2^32.counters (bytes): blocks * 4bit vector (bytes): (N / blocks) / 8blocks * 4 = (N / blocks) / 8blocks^2 = N / 32blocks = sqrt(N/2)/4\nIt\u2019s possible to find a missing integer with just under 65KB (or, more exactly, sqrt(2)*2^15 \nbytes).\n1 int bitsize = 1048576; // 2^20 bits (2^17 bytes)\n2 int blockNum = 4096; // 2^12\n3 \n4 int[] blocks = new int[blockNum];\n5 \n6 \n7 int starting = -1;\n8", "question": "Given an input file with four billion integers, provide an algorithm to generate an \ninteger which is not contained in the file. Assume you have 1 GB of memory.\nFOLLOW UP\n \nWhat if you have only 10 MB of memory?", "id": 156}, {"answer": "30 }\n31 }\n32 \n33 \n\n34 for (int j = 0; j < 8; j++) {\n35 /* Retrieves the individual bits of each byte. When 0 bit \n36 \n37 \n38 System.out.println(i * 8 + j + starting);\n39 return;\n40 }\n41 }\n42 } \n43 }", "question": "9 while (in.hasNextInt()) {\n10 int n = in.nextInt();\n11 \n\n12 }\n13 \n14 for (int i = 0; i < blocks.length; i++) {\n15 \n16 /* if value < 2^20, then at least 1 number is missing in\n17 * that section. */\n18 \n19 break;\n20 }\n21 }\n22 \n23 \n\n24 while (in.hasNextInt()) {\n25 int n = in.nextInt();\n26 /* If the number is inside the block that\u2019s missing numbers,\n27 * we record it */\n28 \n\n29", "id": 158}, {"answer": "We have 4KB of memory which means we can address up to 8 * 4 * (2^10) bits. Note that 32* \n(2^10) bits is greater than 32000. We can create a bit vector with 32000 bits, where each bit \nrepresents one integer. \nNOTE: While this isn\u2019t an especially di\u02ddcult problem, it\u2019s important to implement this cleanly. \nWe will define our own bit vector class to hold a large bit vector.\n1 public static void checkDuplicates(int[] array) {\n2 BitSet bs = new BitSet(32000);\n3 for (int i = 0; i < array.length; i++) {\n4 int num = array[i];\n5 int num0 = num - 1; // bitset starts at 0, numbers start at 1\n6 if (bs.get(num0)) { \n7 System.out.println(num);\n8 } else {\n9 bs.set(num0); \n10 }\n11 }\n12 }\n13 \n14 class BitSet {\n15 int[] bitset;\n16 \n17 public BitSet(int size) {\n18 bitset = new int[size >> 5]; // divide by 32\n19 }\n20 \n21 boolean get(int pos) {\n22 int wordNumber = (pos >> 5); // divide by 32\n23 int bitNumber = (pos & 0x1F); // mod 32\n24 return (bitset[wordNumber] & (1 << bitNumber)) != 0;\n25 }\n26 \n27 void set(int pos) {\n28 int wordNumber = (pos >> 5); // divide by 32\n29 int bitNumber = (pos & 0x1F); // mod 32\n30 bitset[wordNumber] \n31 }\n32 }", "question": "You have an array with all the numbers from 1 to N, where N is at most 32,000. The \narray may have duplicate entries and you do not know what N is. With only 4KB of \nmemory available, how would you print all duplicate elements in the array?", "id": 160}, {"answer": "First, how does the crawler get into a loop? The answer is very simple: when we re-parse an \nalready parsed page. This would mean that we revisit all the links found in that page, and this \nwould continue in a circular fashion.\nBe careful about what the interviewer considers the \u201csame\u201d page. Is it URL or content? One \ncould easily get redirected to a previously crawled page.\nSo how do we stop visiting an already visited page? The web is a graph-based structure, \nand we commonly use DFS (depth first search) and BFS (breadth first search) for traversing \ngraphs. We can mark already visited pages the same way that we would in a BFS/DFS.\nWe can easily prove that this \nalgorithm will terminate in any case. We know that each step \nof the algorithm will parse only new pages, not already visited pages. So, if we assume that \nwe have N number of unvisited pages, then at every step we are reducing N (N-1) by 1. That \nproves that our algorithm will continue until they are only N steps.\n \n\u00bb This question has a lot of ambiguity. Ask clarifying questions!\n \n\u00bb Be prepared to answer questions about coverage.\n \n\u00bb What kind of pages will you hit with a DFS versus a BFS?\n \n\u00bb What will you do when your crawler runs into a honey pot that generates an infinite \nsubgraph for you to wander about?", "question": "If you were designing a web crawler, how would you avoid getting into infinite loops?", "id": 162}, {"answer": "Observations:\n1. Pages are huge, so bringing all of them in memory is a costly affair. We need a shorter \nrepresentation of pages in memory. A hash is an obvious choice for this.\n2. Billions of urls exist so we don\u2019t want to compare every page with every other page \n(that would be \nO(n^2)).\nBased on the above two observations we can derive an algorithm which is as follows:\n1. Iterate through the pages and compute the hash table of each one.\n2. Check if the hash value is in the hash table. If it is, throw out the url as a duplicate. If it \nis not, then keep the url and insert it in into the hash table.\nThis algorithm will provide us a list of unique urls. But wait, can this fit on one computer?\n \n\u00bb How much space does each page take up in the hash table?\n \n\u00bb Each page hashes to a four byte value.\n \n\u00bb Each url is an average of 30 characters, so that\u2019s another 30 bytes at least.\n \n\u00bb Each url takes up roughly 34 bytes.\n \n\u00bb 34 bytes * 1 billion = 31.6 gigabytes. We\u2019re going to have trouble holding that all in \nmemory!\nWhat do we do?\n \n\u00bb We could split this up into files. We\u2019ll have to deal with the file loading / unloading\u2014ugh.\n \n\u00bb We could hash to disk. Size wouldn\u2019t be a problem, but access time might. A hash table \non disk would require a random access read for each check and write to store a viewed \nurl. This could take msecs waiting for seek and rotational latencies. Elevator algorithms \ncould elimate random bouncing from track to track.\n \n\u00bb Or, we could split this up across machines, and deal with network latency. Let\u2019s go with \nthis solution, and assume we have n machines.\n \n\u00bb First, we hash the document to get a hash value v\n \n\u00bb v%n tells us which machine this document\u2019s hash table can be found on.\n \n\u00bb v / n is the value in the hash table that is located on its machine.", "question": "You have a billion urls, where each is a huge page. How do you detect the duplicate \ndocuments?", "id": 164}, {"answer": "Construct an index for each field that requires range queries. Use a B+ tree to implement \nthe index. A B+ \ntree organizes sorted data for e\u02ddcient insertion, retrieval and removal of \nrecords. Each record is identified by a key (for this problem, it is the field value). Since it is a \ndynamic, multilevel index, finding the beginning of the range depends only on the height \nof the tree, which is usually quite small. Record references are stored in the leaves, sorted by \nthe key. \nAdditional records can be found by following a next block reference. Records will be \nsequentially available until the key value reaches the maximum value specified in the query. \nThus, runtimes will be dominated by the number of elements in a range.\nAvoid using trees that store data at interior nodes, as traversing the tree will be expensive \nsince it won\u2019t be resident in memory.", "question": "You have to design a database that can store terabytes of data. It should support ef-ficient range queries. How would you do it?", "id": 166}, {"answer": "The printf will never get executed, as \u201ci\u201d is initialized to 100, so condition check \u201ci <= 0\u201d will fail.\nSuppose the code is changed to \u201ci >= 0.\u201d Then, it will become an infinite loop, because \u201ci\u201d is an \nunsigned int which can\u2019t be negative.\nThe correct code to print all numbers from 100 to 1, is \u201ci > 0\u201d.\n1 unsigned int i; \n2 for (i = 100; i > 0; --i) \n3 \n\nOne additional correction is to use %u in place of %d, as we are printing unsigned int.\n1 unsigned int i; \n2 for (i = 100; i > 0; --i) \n3 printf(\u201c%u", "question": "Find the mistake(s) in the following code:\n 1 unsigned int i;2 for (i = 100; i <= 0; --i)3", "id": 168}, {"answer": "The question largely depends on the type of application being diagnosed. However, we can \ngive some general causes of random crashes.\n1. Random variable: The application uses some random number or variable \ncomponent \nwhich may not be fixed for every execution of the program. Examples include: user \ninput, a random number generated by the program, or the time of day.\n2. Memory Leak: The program may have run out of memory. Other culprits are totally \nrandom for each run since it depends on the number of processes running at that \nparticular time. This also includes heap overflow or corruption of data on the stack.\nIt is also possible that the program depends on another application / external module that \ncould lead to the crash. If our application, for example, depends on some system attributes \nand they are modified by another program, then this interference may lead to a crash. Pro-grams which interact with hardware are more prone to these errors.\nIn an interview, we should ask about which kind of application is being run. This information \nmay give you some idea about the kind of error the interviewer is looking for. For example, \na web server is more prone to memory leakage, whereas a program that runs close to the \nsystem level is more prone to crashes due to system dependencies.", "question": "You are given the source to an application which crashes when it is run. After running \nit ten times in a debugger, you find it never crashes in the same place. The application \nis single threaded, and uses only the C standard library. What programming errors \ncould be causing this crash? How would you test each one?", "id": 170}, {"answer": "There are two primary types of testing we should\n do:\nValidation of input/output:\n \nWe should validate both the input and output to make sure that each are valid. This might \nentail:\n1. Checking whether input is within the board limit.\n \n\u00bb Attempt to pass in negative numbers\n \n\u00bb Attempt to pass in x which is larger than the width\n \n\u00bb Attempt to pass in y which is larger than the width\nDepending on the implementation, these should either return false or throw an excep-tion.\n2. Checking if output is within the valid set of return values. (Not an issue in this case, \nsince there are no \u201cinvalid\u201d boolean values.)\nFunctional testing:\n \nIdeally, we would like to test every possible board, but this is far too big. We can do a reason-able coverage of boards however. There are 6 pieces in chess, so we need to do something \nlike this:\n1 foreach piece a:\n2 for each other type of piece b (6 types + empty space)\n3 foreach direction d\n4 Create a board with piece a. \n5 Place piece b in direction d.\n6 Try to move \u2013 check return value.", "question": "We have the following method used in a chess game: boolean canMoveTo(int x, int y) \nx and y are the coordinates of the chess board and it returns whether or not the piece \ncan move to that position. Explain how you would test this method.", "id": 172}, {"answer": "Load testing helps to identify a web application\u2019s maximum operating capacity, as well as \nany bottlenecks that may interfere with its performance. Similarly, it can check how an ap-plication responds to variations in load.\nTo perform load testing, we must first identify the performance-critical scenarios and the \nmetrics which fulfill our performance objectives. Typical criteria include:\n \n\u00bb response time\n \n\u00bb throughput\n \n\u00bb resource utilization\n \n\u00bb maximum load that the system can bear.\nThen, we design tests to simulate the load, taking care to measure each of these criteria.\nIn the absence of formal testing tools, we can basically create our own. For example, we \ncould simulate concurrent users by creating thousands of virtual users. We would write a \nmulti-threaded program with thousands of threads, where each thread acts as a real-world \nuser loading the page. For each user, we would programmatically measure response time, \ndata I/O, etc.\nWe would then analyze the results based on the data gathered during the tests and compare \nit with the accepted values.", "question": "How would you load test a webpage without using any test tools?", "id": 174}, {"answer": "This problem is largely about understand the constraints: what exactly is the pen? You \nshould ask a lot of questions to understand what exactly you are trying to test. To illustrate \nthe technique in this problem, let us guide you through a mock-conversation.\n\n How would you test a pen?\n\nLet me find out a bit about the pen. Who is going to use the pen?\n\nProbably children.\n\nOk, that\u2019s interesting. What will they be doing with it? Will they be writing, draw-ing, or doing something else with it?\n\nDrawing.\n\nOk, great. On what? Paper? Clothing? Walls?\n\nOn clothing.\n\nGreat. What kind of tip does the pen have? Felt? Ball point? Is it intended to \nwash off, or is it intended to be permanent?\n\nIt\u2019s intended to wash off.\n\u2013. many questions later ...\n\nOk, so as I understand it, we have a pen that is being targeted at 5\u201410 year olds. \nThe pen has a felt tip and comes in red, green, blue and black. It\u2019s intended to wash off cloth-ing. Is that correct?\n\u2013\nThe candidate now has a problem that is significantly different from what it initially seemed \nto be. Thus, the candidate might now want to test:\n1. Does the pen wash off with warm water, cold water, and luke warm water?\n2. Does the pen wash off after staying on the clothing for several weeks? What happens if \nyou wash the clothing while the pen is still wet?\n3. Is the pen safe (e.g.\u2014non-toxic) for children?\nand so on...", "question": "How would you test a pen?", "id": 176}, {"answer": "The first thing to \ndo on this question is to clarify assumptions. Ask the following questions:\n \n\u00bb Who is going to use the ATM? Answers might be \u201canyone,\u201d or it might be \u201cblind people\u201d - or any number of other answers.\n \n\u00bb What are they going to use it for? Answers might be \u201cwithdrawing money,\u201d \u201ctransferring \nmoney,\u201d \u201cchecking their balance,\u201d or many other answers.\n \n\u00bb What tools do we have to test? Do we have access to the code, or just the ATM machine?\nRemember: a good tester makes sure she knows what she\u2019s testing!\nHere are a few test cases for how to test just the withdrawing functionality:\n \n\u00bb Withdrawing money less than the account balance\n \n\u00bb Withdrawing money greater than the account balance\n \n\u00bb Withdrawing money equal to the account balance\n \n\u00bb Withdrawing money from an ATM and from the internet at the same time\n \n\u00bb Withdrawing money when the connection to the bank\u2019s network is lost\n \n\u00bb Withdrawing money from multiple ATMs simultaneously", "question": "How would you test an ATM in a distributed banking system?", "id": 178}, {"answer": "One brute force way could be to \ncount the number of lines (N) and then print from N-10 to \nNth line. But, this requires two reads of the file \u2013 potentially very costly if the file is large.\nWe need a solution which allows us to read just once and be able to print the last K lines. We \ncan create extra space for K lines and then store each set of K lines in the array. So, initially, \nour array has lines 0 through 9, then 1 through 10, then 2 through 11, etc (if K = 10). Each \ntime that we read a new line, we purge the oldest line from the array. Instead of shifting the \narray each time (very ine\u02ddcient), we will use a circular array. This will allow us to always find \nthe oldest element in O(1) time.\nExample of inserting elements into a circular array: step 1 (initially): array = {a, b, c, d, e, f}. p = 0step 2 (insert g): array = {g, b, c, d, e, f}. p = 1step 3 (insert h): array = {g, h, c, d, e, f}. p = 2step 4 (insert i): array = {g, h, i, d, e, f}. p = 3\nCode:\n1 string L[K];\n2 int lines = 0; \n3 \n4 \n\n5 ++lines; \n6 } \n7 // if less than K lines were read, print them all \n8 int start, count;\n9 if (lines < K) {\n10 start = 0;\n11 count = lines;\n12 } else {\n13 start = lines % K;\n14 count = K;\n15 }\n16 for (int i = 0; i < count; ++i) { \n17 cout << L[(start + i) % K] << endl; \n18 }\n \n\u00bb Note, if you do printf(L[(index + i) % K]) when there are %\u2019s in the string, bad things will \nhappen.", "question": "Write a method to print the last K lines of an input file using C++.", "id": 180}, {"answer": "Compare and\n contrast Hash Table vs. STL map\nIn a hash table, a value is stored by applying hash function on a key. Thus, values are not \nstored in a hash table in sorted order. Additionally, since hash tables use the key to find the \nindex that will store the value, an insert/lookup can be done in amortised O(1) time (assum-ing only a few collisions in the hashtable). One must also handle potential collisions in a \nhashtable.\nIn an STL map, insertion of key/value pair is in sorted order of key. It uses a \ntree to store \nvalues, which is why an \nO(log N) insert/lookup is required. There is also no need to handle \ncollisions. An STL map works well for things like:\n \n\u00bb find min element\n \n\u00bb find max element\n \n\u00bb print elements in sorted order\n \n\u00bb find the exact element or, if the element is not found, find the next smallest number\nHow is a hash table implemented?\n1. A good hash function is required (e.g.: operation % prime number) to ensure that the \nhash values are uniformly distributed.\n2. A collision resolving method is also needed: chaining (good for dense table entries), \nprobing (good for sparse table entries), etc.\n3. Implement methods to dynamically increase or decrease the hash table size on a given \ncriterion. For example, when the [number of elements] by [table size] ratio is greater \nthan the fixed threshold, increase the hash table size by creating a new hash table and \ntransfer the entries from the old table to the new table by computing the index using \nnew hash function.\nWhat can be used instead of a hash table, if the number of inputs is small?\nYou can use an STL map. Although this takes O(log n) time, since the number of inputs is \nsmall, this time is negligible.", "question": "Compare and contrast a \nhash table vs. an STL map. How is a hash table implemented? \nIf the number of inputs is small, what data structure options can be used instead of \na hash table?", "id": 182}, {"answer": "A virtual function depends on a \u201cvtable\u201d or \u201cVirtual Table\u201d. If any function of a class is declared \nas virtual, a v-table is \nconstructed which stores addresses of the virtual functions of this class. \nThe compiler also adds a hidden vptr variable in all such classes which points to the vtable of \nthat class. If a virtual function is not overridden in the derived class, the vtable of the derived \nclass stores the address of the function in his parent class. The v-table is used to resolve the \naddress of the function, for whenever the virtual function is called. Dynamic binding in C++ \nis therefore performed through the vtable mechanism.\nThus, when we assign the derived class object to the base class pointer, the vptr points to the \nvtable of the derived class. This assignment ensures that the most derived virtual function \ngets called.\n1 class Shape {\n2 public:\n3 int edge_length;\n4 virtual int circumference () {\n5 \n6 return 0;\n7 }\n8 };\n9 class Triangle: public Shape {\n10 public:\n11 int circumference () {\n12 \n13 return 3 * edge_length;\n14 }\n15 };\n16 void main() {\n17 Shape * x = new Shape();\n18 x->circumference(); // prints \u201cCircumference of Base Class\u201d\n19 Shape *y = new Triangle();\n20 y->circumference(); // prints \u201cCircumference of Triangle Class\u201d\n21 }\nIn the above example, circumference is a virtual function in shape class, so it becomes vir-tual in each of the derived classes (triangle, rectangle). C++ non-virtual function calls are \nresolved at compile time with static binding, while virtual function calls are resolved at run \ntime with dynamic binding.", "question": "How do virtual functions work in C++?", "id": 184}, {"answer": "1 struct Test {\n2 char * ptr;\n3 };\n4 void shallow_copy(Test & src, Test & dest) {\n5 dest.ptr = src.ptr;\n6 }\n7 void deep_copy(Test & src, Test & dest) {\n8 dest.ptr = malloc(strlen(src.ptr) + 1);\n9 memcpy(dest.ptr, src.ptr);\n10 }\nNote that shallow_copy may cause a lot of programming run-time errors, especially with the \ncreation and deletion of objects. Shallow copy should be used very carefully and only when \na programmer really understands what he wants to do. In most cases shallow copy is used \nwhen there is a need to pass information about a complex structure without actual duplica-tion of data (e.g., call by reference). One must also be careful with destruction of shallow \ncopy. \nIn real life, shallow copy is rarely used. There is an important programming concept called \n\u201csmart pointer\u201d that, in some sense, is an enhancement of the shallow copy concept.\nDeep copy should be used in most cases, especially when the size of the copied structure is \nsmall.", "question": "What is the difference between deep copy and shallow copy? Explain how you would \nuse each.", "id": 186}, {"answer": "Volatile \ninforms the compiler that the value of the variable can change from the outside, \nwithout any update done by the code.\nDeclaring a simple volatile variable:volatile int x;int volatile x;\nDeclaring a pointer variable for a volatile memory (only the pointer address is volatile):volatile int * x;int volatile * x;\nDeclaring a volatile pointer variable for a non-volatile memory (only memory contained is \nvolatile):int * volatile x;\nDeclaring a volatile variable pointer for a volatile memory (both pointer address and memo-ry contained are volatile):volatile int * volatile x;int volatile * volatile x;\nVolatile variables are not optimized, but this can actually be useful. Imagine this function:\n1 int opt = 1; \n2 void Fn(void) {\n3 start:\n4 if (opt == 1) goto start;\n5 else break;\n6 }\nAt first glance, our code appears to loop infinitely. The compiler will try to optimize it to: \n1 void Fn(void) {\n2 start:\n3 int opt = 1;\n4 if (true)\n5 goto start;\n6 }\nThis becomes an infinite loop. However, an external program might write \u201a0\u2019 to the location \nof variable opt. Volatile variables are also useful when multi-threaded programs have global \nvariables and any thread can modify these shared variables. Of course, we don\u2019t want opti-mization on them.", "question": "What is the significance of the keyword \u201cvolatile\u201d in C?", "id": 188}, {"answer": "Let us explain \nthrough an example. In C++, when you have a class with an overloaded meth-od, and you then extend and override that method, you must override all of the overloaded \nmethods.\nFor example:\n1 class FirstClass {\n2 public:\n3 virtual void MethodA (int);\n4 virtual void MethodA (int, int);\n5 };\n6 void FirstClass::MethodA (int i) { \n7 \n\n8 }\n9 void FirstClass::MethodA (int i, int j) { \n10 \n\n11 }\nThis is a simple class with two methods (or one overloaded method). If you want to override \nthe one-parameter version, you can do the following:\n1 class SecondClass : public FirstClass {\n2 public:\n3 void MethodA (int);\n4 };\n5 void SecondClass::MethodA (int i) { \n6 \n\n7 }\n8 void main () {\n9 SecondClass a;\n10 a.MethodA (1);\n11 a.MethodA (1, 1);\n12 }\nHowever, the second call won\u2019t work, since the two-parameter MethodA is not visible. That \nis name hiding.", "question": "What is name hiding in C++?", "id": 190}, {"answer": "Calling a method with an object \npointer always invokes:\n \n\u00bb the most derived class function, if a method is virtual.\n \n\u00bb the function implementation corresponding to the object pointer type (used to call the \nmethod), if a method is non-virtual.\nA virtual destructor works in the same way. A destructor gets called when an object goes out \nof scope or when we call delete on an object pointer.\nWhen any derived class object goes out of scope, the destructor of that derived class gets \ncalled first. It then calls its parent class destructor so memory allocated to the object is prop-erly released.\nBut, if we call delete on a base pointer which points to a derived class object, the base class \ndestructor gets called first (for non-virtual function). For example:\n1 class Base { \n2 public: \n3 Base() { cout << \u201cBase Constructor \u201c << endl; } \n4 ~Base() { cout << \u201cBase Destructor \u201c << endl; } /* see below */\n5 }; \n6 class Derived: public Base { \n7 public: \n8 Derived() { cout << \u201dDerived Constructor \u201c << endl; } \n9 ~Derived() { cout << \u201dDerived Destructor \u201c << endl; } \n10 }; \n11 void main() { \n12 Base *p = new Derived(); \n13 delete p; \n14 }\nBase ConstructorDerived ConstructorBase Destructor\nIf we declare the base class destructor as virtual, this makes all the derived class destructors \nvirtual as well.\nIf we replace the above destructor with:\n1 virtual ~Base() {\n2 cout << \u201cBase Destructor\u201d << endl;\n3 }\n\nThen the output becomes:Base ConstructorDerived ConstructorDerived DestructorBase Destructor\nSo we should use virtual destructors if we call delete on a base class pointer which points to \na derived class.", "question": "Why does a \ndestructor in base class need to be declared virtual?", "id": 192}, {"answer": "The algorithm will maintain a mapping from a node address in the original structure to the \ncorresponding \nnode in the new structure. This mapping will allow us to discover previously \ncopied nodes during a traditional depth first traversal of the structure. (Traversals often mark \nvisited nodes--the mark can take many forms and does not necessarily need to be stored in \nthe node.) Thus, we have a simple recursive \nalgorithm:\n1 typedef map<Node*, Node*> NodeMap;\n2 \n3 Node * copy_recursive(Node * cur, NodeMap & nodeMap) {\n4 if(cur == NULL) {\n5 return NULL;\n6 }\n7 \n\n8 if (i != nodeMap.end()) {\n9 // we\u2019ve been here before, return the copy\n10 return i->second;\n11 }\n12 Node * node = new Node;\n13 nodeMap[cur] = node; // map current node before traversing links\n14 node->ptr1 = copy_recursive(cur->ptr1, nodeMap);\n15 node->ptr2 = copy_recursive(cur->ptr2, nodeMap);\n16 return node;\n17 }\n18 Node * copy_structure(Node * root) {\n19 NodeMap nodeMap; // we will need an empty map\n20 return copy_recursive(root, nodeMap);\n21 }\n22", "question": "Write a method that takes a pointer to a Node structure as a parameter and returns \na complete copy of the passed-in data structure. The Node structure contains two \npointers to other Node structures.", "id": 194}, {"answer": "Smart_ptr is the same as a normal pointer, but it provides safety via automatic memory. It \navoids dangling pointers, memory leaks, allocation failures etc. The smart pointer must main-tain a single reference count for all instances.\n1 template <class T> class SmartPointer {\n2 public:\n3 SmartPointer(T * ptr) {\n4 ref = ptr;\n5 ref_count = (unsigned*)malloc(sizeof(unsigned));\n6 *ref_count = 1;\n7 }\n8 SmartPointer(SmartPointer<T> & sptr) {\n9 ref = sptr.ref;\n10 ref_count = sptr.ref_count;\n11 ++*ref_count;\n12 }\n13 SmartPointer<T> & operator=(SmartPointer<T> & sptr) {\n14 if (this != &sptr) {\n15 ref = sptr.ref;\n16 ref_count = sptr.ref_count;\n17 ++*ref_count;\n18 }\n19 return *this;\n20 }\n21 ~SmartPointer() {\n22 --*ref_count;\n23 if (*ref_count == 0) {\n24 delete ref;\n25 free(ref_count);\n26 ref = NULL;\n27 ref_count = NULL;\n28 }\n29 }\n30 T getValue() { return *ref; }\n31 protected: \n32 T * ref;\n33 unsigned * ref_count;\n34 };", "question": "Write a smart pointer (smart_ptr) class.", "id": 196}, {"answer": "Declaring the constructor private will ensure that no one outside of the class can directly in-stantiate the class. In this case, the only way \nto create an instance of the class is by providing \na static public method, as is done when using the Factory Method Pattern.\nAdditionally, because the constructor is private, the class also cannot be inherited.", "question": "In terms of inheritance, what is the effect of keeping a \nconstructor private?", "id": 198}, {"answer": "Yes, it will get executed.\nThe finally block gets executed when the try block exists. However, even when we attempt \nto exit within the try block (normal exit, \nreturn, continue, break or any exception), the finally \nblock will still be executed.\nNote: There are some cases in which the flnally block will not get executed: if the \nvirtual machine exits in between try/catch block execution, or the thread which \nis executing try/catch block gets killed.", "question": "In Java, does the finally block gets executed if we insert a return statement inside the \ntry block of a try-catch-finally?", "id": 200}, {"answer": "Final\n \nWhen applied to a variable (\nprimitive): The value of the variable cannot change. \nWhen applied to a variable (reference): The reference variable cannot point to any other ob-ject on the heap. \nWhen applied to a method: The method cannot be overridden. \nWhen applied to a class: The class cannot be subclassed. \nFinally \nThere is an optional finally block after the try block or after the catch block. Statements in the \nfinally block will always be executed (except if JVM exits from the try block). The finally block \nis used to write the clean up code. \nFinalize \nThis is the method that the JVM runs before running the garbage collector.", "question": "What is the difference between final, finally, and finalize?", "id": 202}, {"answer": "Classes and functions can be templated.\nClasses and \nmethods can be genericized.\nParameters can be any type or integral \nvalue.\nParameters can only be reference types \n(not primitive types).\nSeparate copies of the class or function are \nlikely to be generated for each type param-eter when compiled.\nOne version of the class or function is com-piled, works for all type parameters.\nObjects of a class with different type pa-rameters are different types at run time.\nType parameters are erased when com-piled; objects of a class with different type \nparameters are the same type at run time.\nImplementation source code of the tem-plated class or function must be included \nin order to use it (declaration insu\u02ddcient).\nSignature of the class or function from a \ncompiled class file is su\u02ddcient to use it.\nTemplates can be specialized - a separate \nimplementation could be provided for a \nparticular template parameter.\nGenerics cannot be specialized.\nDoes not support wildcards. Instead, re-turn types are often available as nested \ntypedefs.\nSupports wildcard as type parameter if it is \nonly used once.\nDoes not directly support bounding of \ntype parameters, but metaprogramming \nprovides this.\nSupports bounding of type parameters \nwith \"extends\" and \"super\" for upper and \nlower bounds, respectively; allows enforce-ment of relationships between type param-eters.\nAllows instantiation of class of type param-eter type.\nDoes not allow instantiation of class of type \nparameter type.\nType parameter of templated class can be \nused for static methods and variables.\nType parameter of templated class cannot \nbe used for static methods and variables.\nStatic variables are not shared between \nclasses of different type parameters.\nStatic variables are shared between instanc-es of a classes of different type parameters.From http://en.wikipedia.org/wiki/Comparison_of_Java_and_C%2B%2B#Templates_vs._Generics", "question": "Explain the difference between templates in C++ and generics in Java.", "id": 204}, {"answer": "Object Reflection is a \nfeature in Java which provides a way to get reflective information about \nJava classes and objects, such as:\n1. Getting information about methods and fields present inside the class at run time.\n2. Creating a new instance of a class.\n3. Getting and setting the object fields directly by getting field reference, regardless of \nwhat the access modifier is.\n1 \n2 \n3 public class Sample {\n4 public static void main(String args[]) {\n5 try {\n6 Class c = Class.forName(\u201cjava.sql.Connection\u201d);\n7 Method m[] = c.getDeclaredMethods();\n8 for (int i = 0; i < 3; i++) {\n9 System.out.println(m[i].toString());\n10 }\n11 } catch (Throwable e) {\n12 System.err.println(e);\n13 }\n14 }\n15 }\nThis code\u2019s output is the names of the first 3 methods inside the \u201cjava.sql.Connection\u201d class \n(with fully qualified parameters).\nWhy it is useful:\n1. Helps in observing or manipulating the runtime behavior of applications.\n2. Useful while debugging and testing applications, as it allows direct access to methods, \nconstructors, fields, etc.", "question": "Explain what object reflection is in Java and why it is useful.", "id": 206}, {"answer": "One simple solution \nis to put count variables for get() and put() methods and, whenever they \nare called, increment the count. We can also achieve this by extending the existing library \nmap and overriding the get() and put() functions.\nAt first glance, this seems to work. However, what if we created multiple instances of the \nmap? How would you sum up the total count for each map object?\nThe simplest solution for this is to keep the count variables static. We know static variables \nhave only one copy for all objects of the class so the total count would be reflected in count \nvariables.", "question": "Suppose you are using a \nmap in your program, how would you count the number of \ntimes the program calls the put() and get() functions?", "id": 208}, {"answer": "This problem uses a straight-forward join of Departments and Employees. Note that we use a \nleft join instead of an inner join \nbecause we want to include Departments with 0 employees.\n1 select Dept_Name, Departments.Dept_ID, count(*) as \u201anum_employees\u2019\n2 from Departments \n3 left join Employees\n4 on Employees.Dept_ID = Departments.Dept_ID \n5 group by Departments.Dept_ID, Dept_Name", "question": "Write a method to find the number of employees in each department.", "id": 210}, {"answer": "JOIN is used to combine the \nresults of two tables. To perform a join, each of the tables must \nhave at least one field which will be used to find matching records from the other table. The \njoin type defines which records will go into the result set. \nLet\u2019s take for example two tables: one table lists \u201cregular\u201d beverages, and another lists the \ncalorie-free beverages. Each table has two fields: the beverage name and its product code. \nThe \u201ccode\u201d field will be used to perform the record matching. \nRegular Beverages:", "question": "What are the different types of joins? Please explain how they differ and why certain \ntypes are better in certain situations.", "id": 212}, {"answer": "COCACOLA\nDiet Coca-Cola\nFRESCA\nFresca\nPEPSI\nDiet Pepsi\nPEPSI\nPepsi Light\nWater\nPurified Water\nLet\u2019s join this table by the code field. Whereas the order of the joined tables makes sense in \nsome cases, we will consider the following statement:[Beverage] JOIN [Calorie-Free Beverage]\ni.e. [Beverage] is from the left of the join operator, and [Calorie-Free Beverage] is from the \nright.", "question": "Budweiser\nBUDWEISER\nCoca-Cola\nCOCACOLA\nPepsi\nPEPSI\nCalorie-Free Beverages:", "id": 214}, {"answer": "OUTER JOIN will always contain the results of INNER JOIN, however it can \ncontain some records that have no matching record in other table. OUTER JOINs are divided \nto following subtypes:", "question": "Result set will contain only those data where the criteria match. In our ex-ample we will get 3 records: 1 with COCACOLA and 2 with PEPSI codes.", "id": 216}, {"answer": ", or simply \n\n: This type of join is the opposite of LEFT \nJOIN; it will contain all records from the right table, and missing fields from the left table will \ncontain NULL. If we have two tables A and B, then we can say that statement A LEFT JOIN B \nis equivalent to statement B RIGHT JOIN A.\nIn our example, we will get 5 records. In addition to INNER JOIN results, FRESCA and WATER \nrecords will be listed.", "question": ", or simply \n\n: The result will contain all records from the left \ntable. If no matching records were found in the right table, then its fields will contain the \nNULL values. In our example, we would get 4 records. In addition to INNER JOIN results, \nBUDWEISER will be listed, because it was in the left table.", "id": 218}, {"answer": "What is denormalization? Explain the pros and cons.", "question": "This type of join combines the results of LEFT and RIGHT joins. All records from both tables \nwill be part of the result set, whether the matching record exists in the other table or not. If \nno matching record was found then the corresponding result fields will have a NULL value.\nIn our example, we will get 6 records.", "id": 220}, {"answer": "Draw an entity-relationship diagram for a database with companies, people, and pro-fessionals (people who work for companies).", "question": "Denormalization is the process of attempting to optimize the performance of a database by \nadding redundant data or by grouping data. In some cases, denormalization helps cover up \nthe ine\u02ddciencies inherent in relational database software. A relational normalized database \nimposes a heavy access load over physical storage of data even if it is well tuned for high \nperformance.\nA normalized design will often store different but related pieces of information in separate \nlogical tables (called relations). If these relations are stored physically as separate disk files, \ncompleting a database query that draws information from several relations (a join operation) \ncan be slow. If many relations are joined, it may be prohibitively slow. There are two strate-gies for dealing with this. The preferred method is to keep the logical design normalized, but \nallow the database management \nsystem (DBMS) to store additional redundant information \non disk to optimize query response. In this case, it is the DBMS software\u2019s responsibility to \nensure that any redundant copies are kept consistent. This method is often implemented \nin SQL as indexed views (Microsoft SQL Server) or materialized views (Oracle). A view rep-resents information in a format convenient for querying, and the index ensures that queries \nagainst the view are optimized.\nThe more usual approach is to denormalize the logical data design. With care, this can \nachieve a similar improvement in query response, but at a cost\u2014it is now the database de-signer\u2019s responsibility to ensure that the denormalized database does not become inconsis-tent. This is done by creating rules in the database called constraints, that specify how the \nredundant copies of information must be kept synchronized. It is the increase in logical com-plexity of the database design and the added complexity of the additional constraints that \nmake this approach hazardous. Moreover, constraints introduce a trade-off, speeding up \nreads (SELECT in SQL) while slowing down writes (INSERT, UPDATE, and DELETE). This means \na denormalized database under heavy write load may actually offer worse performance than \nits functionally equivalent normalized counterpart.\nA denormalized data model is not the same as a data model that has not been normalized, \nand denormalization should only take place after a satisfactory level of normalization has \ntaken place and that any required constraints and/or rules have been created to deal with \nthe inherent anomalies in the design. For example, all the relations are in third normal form \nand any relations with join and multivalued dependencies are handled appropriately.\nFrom \nhttp://en.wikipedia.org/wiki/Denormalization", "id": 222}, {"answer": "Imagine a simple database storing information for students\u2019 grades. Design what this \ndatabase might look like, and provide a SQL query to return a list of the honor roll \nstudents (top 10%), sorted by their grade point average.", "question": "People who work for companies are Professionals. So there is an ISA (is a) relationship be-tween People and Professionals (or we could say that a Professional is derived from People).\nEach Professional has additional information such as degree, work experiences, etc, in addi-tion to the properties derived from People.\nA Professional works for one company at a time, but Companies can hire many Professionals, \nso there is a Many to One relationship between Professionals and Companies. This \u201cWorks \nFor\u201d relationship can store attributes such as date of joining the company, salary, etc. These \nattributes are only defined when we relate a Professional with a Company.\nA Person can have multiple phone numbers, which is why Phone is a multi-valued attribute.ProfessionalPeopleCompaniesWorks ForDegreeExperienceSalaryAddressCNameCIDDate of JoiningAddressPhoneISAN1DOBSexPNamePID", "id": 224}, {"answer": "Explain the following terms: virtual memory, page fault, thrashing.", "question": "In a simplistic database, we\u2019ll have at least these three objects: Students, Courses, and Cour-seEnrollment. Students will have at least the student name and ID, and will likely have other \npersonal information. Courses will contain the course name and ID, and will likely contain \nthe course description, professor, etc. CourseEnrollment will pair Students and Courses, and \nwill also contain a field for CourseGrade. We will assume that CourseGrade is an integer.\nOur SQL query to get the list of honor roll students might look like this:\n1 SELECT StudentName, GPA\n2 FROM (\n3 SELECT top 10 percent Avg(CourseEnrollment.Grade) AS GPA,\n4 CourseEnrollment.StudentID\n5 FROM CourseEnrollment\n6 GROUP BY CourseEnrollment.StudentID\n7 ORDER BY Avg(CourseEnrollment.Grade)) Honors \n8 INNER JOIN Students ON Honors.StudentID = Students.StudentID\nThis database could get arbitrarily more complicated if we wanted to add in professor infor-mation, billing, etc.", "id": 226}, {"answer": "What is a Branch Target buffer? Explain how it can be used in reducing bubble cycles \nin cases of branch misprediction.", "question": "Virtual memory\n is a computer system technique which gives an application program the im-pression that it has contiguous working memory (an address space), while in fact it may be \nphysically fragmented and may even overflow on to disk storage. Systems that use this tech-nique make programming of large applications easier and use real physical memory (e.g. \nRAM) more e\u02ddciently than those without virtual memory.\nhttp://en.wikipedia.org/wiki/Virtual_memory \nPage Fault: \nA page is a fixed-length block of memory that is used as a unit of transfer be-tween physical memory and external storage like a disk, and a page fault is an interrupt (or \nexception) to the software raised by the hardware, when a program accesses a page that is \nmapped in address space, but not loaded in physical memory.\nhttp://en.wikipedia.org/wiki/Page_fault\nThrash is the term used to describe a degenerate situation on a computer where increas-ing resources are used to do a decreasing amount of work. In this situation the system is \nsaid to be thrashing. Usually it refers to two or more processes accessing a shared resource \nrepeatedly such that serious system performance degradation occurs because the system is \nspending a disproportionate amount of time just accessing the shared resource. Resource \naccess time may generally be considered as wasted, since it does not contribute to the ad-vancement of any process. In modern computers, thrashing may occur in the paging system \n(if there is not \u201asu\u02ddcient\u2019 physical memory or the disk access time is overly long), or in the \ncommunications system (especially in conflicts over internal bus access), etc.\nhttp://en.wikipedia.org/wiki/Thrash_(computer_science)", "id": 228}, {"answer": "Describe direct memory access (DMA). Can a user level buffer / pointer be used by \nkernel or drivers?", "question": "Branch misprediction occurs when the CPU mispredicts the next instruction to be executed.\nThe CPU uses pipelining which allows several instructions to be processed simultaneously. \nBut during a conditional jump, the next instruction to be executed depends on the result of \nthe condition. Branch Prediction tries to guess the next instruction. However, if the guess is \nwrong, we are penalized because the instruction which was executed must be discarded.\nBranch Target Buffer (BTB) reduces the penalty by predicting the path of the branch, comput-ing the target of the branch and caching the information used by the branch. There will be \nno stalls if the branch entry found on BTB and the prediction is correct, otherwise the penalty \nwill be at least two cycles.", "id": 230}, {"answer": "Write a step by step execution of things that happen after a user presses a key on the \nkeyboard. Use as much detail as possible.", "question": "Direct Memory is a feature which provides direct access (read/write) to system memory with-out interaction from the CPU. The \u201cDMA Controller\u201d manages this by requesting the System \nbus access (DMA request) from CPU. CPU completes its current task and grants access by as-serting DMA acknowledgement signal. Once it gets the access, it reads/writes the data and \nreturns back the system bus to the CPU by asserting the bus release signal. This transfer is \nfaster than the usual transfer by CPU. Between this time CPU is involved with processing task \nwhich doesn\u2019t require memory access.\nBy using DMA, drivers can access the memory allocated to the user level buffer / pointer.", "id": 232}, {"answer": "Write a program to find whether a machine is big endian or little endian.", "question": "1. The keyboard sends a scan code of the key to the keyboard controller (Scan code for \nkey pressed and key released is different).\n2. The keyboard controller interprets the scan code and stores it in a buffer.\n3. The keyboard controller sends a hardware interrupt to the processor. This is done by \nputting signal on \u201cinterrupt request line\u201d: IRQ 1.\n4. The interrupt controller maps IRQ 1 into INT 9.\n5. An interrupt is a signal which tells the processor to stop what it was doing currently \nand do some special task.\n6. The processor invokes the \u201cInterrupt handler\u201d. CPU fetches the address of \u201cInterrupt \nService Routine\u201d (ISR) from \u201cInterrupt Vector Table\u201d maintained by the OS (Processor \nuse the IRQ number for this).\n7. The ISR reads the scan code from port 60h and decides whether to process it or pass \nthe control to program for taking action.", "id": 234}, {"answer": "Discuss how would you make sure that a process doesn\u2019t access an unauthorized part \nof the stack.", "question": "1 \n2 \n3 int TestByteOrder() { \n4 short int word = 0x0001; \n5 char *byte = (char *) &word; \n6 return (byte[0] ? LITTLE_ENDIAN : BIG_ENDIAN); \n7 }", "id": 236}, {"answer": "What are the best practices to prevent reverse engineering of DLLs?", "question": "As with any ambiguously worded interview question, it may help to probe the interviewer to \nunderstand what specifically you\u2019re intended to solve. Are you trying to prevent code that \nhas overflowed a buffer from compromising the execution by overwriting stack values? Are \nyou trying to maintain some form of \nthread-specific isolation between threads? Is the code \nof interest native code like C++ or running under a virtual machine like Java? \nRemember that, in a multi-threaded environment, there can be multiple stacks in a process. \nNATIVE CODE \nOne threat to the stack is malicious program input, which can overflow a buffer and over-write stack pointers, thus circumventing the intended execution of the program. \nIf the interviewer is looking for a simple method to reduce the risk of buffer overflows in \nnative code, modern compilers provide this sort of stack protection through a command \nline option. With Microsoft\u2019s CL, you just pass /GS to the compiler. With GCC, you can use -fstack-protector-all. \nFor more complex schemes, you could set individual permissions on the range of memory \npages representing the stack section you care about. In the Win32 API, you\u2019d use the Virtu-alProtect API to mark the page PAGE_READONLY or PAGE_NOACCESS. This will cause the \ncode accessing the region to go through an exception on access to the specific section of \nthe stack. \nAlternately, you could use the HW Debug Registers (DRs) to set a read or write breakpoint \non the specific memory addresses of interest. A separate process could be used to debug \nthe process of interest, catch the HW exception that would be generated if this section of the \nstack were accessed.\nHowever, it\u2019s very important to note that under normal circumstances, threads and processes \nare not means of access control. Nothing can prevent native code from writing anywhere \nwithin the address space of its process, including to the stack. Specifically, there is nothing \nto prevent malicious code in the process from calling VirtualProtect and marking the stack \nsections of interest PAGE_EXECUTE_READWRITE. Equally so, nothing prevents code from \nzeroing out the HW debug registers, eliminating your breakpoints. In summary, nothing can \nfully prevent native code from accessing memory addresses, including the stack, within its \nown process space. \nMANAGED CODE\n \n\nA final option is to consider requiring this code that should be \u201csandboxed\u201d to run in a man-aged language like Java or C# / .NET. By default, the virtual machines running managed code \nin these languages make it impossible to gain complete access to the stack from within the \nprocess. \nOne can use further security features of the runtimes to prevent the code from spawning ad-ditional processes or running \u201cunsafe\u201d code to inspect the stack. With .NET, for example, you \ncan use Code Access Security (CAS) or appdomain permissions to control such execution.", "id": 238}, {"answer": "A device boots with an empty FIFO queue. In the first 400 ns period after startup, \nand in each subsequent 400 ns period, a maximum of 80 words will be written to the \nqueue. Each write takes 4 ns. A worker thread requires 3 ns to read a word, and 2 ns to \nprocess it before reading the next word. What is the shortest depth of the FIFO such \nthat no data is lost?", "question": "Best practices include the following:\n \n\u00bb Use obfuscators.\n \n\u00bb Do not store any data (string, etc) in open form. Always compress or encode it.\n \n\u00bb Use a static link so there is no DLL to attack.\n \n\u00bb Strip all symbols.\n \n\u00bb Use a .DEF file and an import library to have anonymous exports known only by their \nexport ids.\n \n\u00bb Keep the DLL in a resource and expose it in the file system (under a suitably obscure \nname, perhaps even generated at run time) only when running.\n \n\u00bb Hide all real functions behind a factory method that exchanges a secret (better, proof of \nknowledge of a secret) for a table of function pointers to the real methods.\n \n\u00bb Use anti-debugging techniques borrowed from the malware world to prevent reverse \nengineering. (Note that this will likely get you false positives from AV tools.)\n \n\u00bb Use protectors.", "id": 240}, {"answer": "WriterAAAABBBBCCCCDDDDEEEEXXXXYYYYZZZZ____\nWorker____aaaaabbbbbcccccdopppppqqqqqrrrrr\nQueue \nLoad111122212222222232223333333343333322 *Y = Writing word 159 @ 712 nsZ = Writing word 160 @ 716 nsq = Processing word 127 @ 714 nsr = Processing word 128* = Between 708 and 723 ns, the queue load is shown as 30 plus the digit shown at each ns.\nNote that the queue load does in fact reach a maximum of 34 at time = 716 ns. \nAs an interesting note, if the problem had required only 2 ns of the 5 ns processing time to \ncomplete a read, the optimal queue depth would decrease to 33.\nThe below graphs are unnecessary, but show empirically that adding writes from the 3rd \nperiod does not change the queue depth required.", "question": "While a perfectly optimal solution is complex, an interviewer is most interested in how you \napproach the problem.\nTHEORY\nFirst, note that writes do not have to be evenly distributed within a period. Thus a likely worst \ncase is 80 words are written at the end of the first period, followed by 80 more at the start of \nthe next.\nNote that the maximum write rate for a full period is exactly matched by a full period of pro-cessing (400 ns / ((3 ns + 2 ns)/process) = 80 processed words/period).\nAs the 2nd period in our example is fully saturated, adding writes from a 3rd period would \nnot add additional stress, and this example is a true worst case for the conditions.\nA SAFE QUEUE DEPTH\nFor an estimate of maximum queue size, notice that these 160 writes take 640 ns (160 writes \n* 4 ns / write = 640 ns), during which time only 128 words have been read (640 ns / ((3 ns + 2 \nns) / word) = 128 words). However, the first read cannot start until the first write has finished, \nwhich fills an extra slot in the queue.\nAlso, depending on the interactions between read and write timing, a second additional slot \nmay be necessary to ensure a write does not trash the contents of a concurrently occurring \nread. Thus, a safe estimate is that the queue must be at least 34 words deep (160 - 128 + 1 + \n1 = 34) to accommodate the unread words.\nFINDING AN OPTIMAL (MINIMAL) QUEUE DEPTH\nDepending on the specifics of the problem, it\u2019s possible that the final queue spot could be \nsafely removed. In many cases, the time required to do an edge case analysis to determine \nsafety is not worth the effort. However, if the interviewer is interested, the full analysis fol-lows.\nWe are interested in the exact queue load during the final (160th) consecutive write to the \nqueue. We can approach this by graphing the queue load from time = 0 ns, observing the \npattern, and extending it to time = 716 ns, the time of the final consecutive write.\nThe graph below shows that the queue load increases as each write begins, and decreases \n\n3 ns after a read begins. Uninteresting time segments are surrounded by [brackets]. Each \ncharacter represents 1 ns.", "id": 242}, {"answer": "WriterYYYYZZZZ____\nWorker^^&&&&&%%%%%\nQueue Load333343333322 *Z = Writing word 240 @ 1116 ns& = Processing word 207 @ 1114 ns* = Between 1112 and 1123 ns, the queue load is shown as 30 plus the digit shown at each ns.", "question": "Writer____AAAABBBB!!@@@@####$$\nWorker^^^&&&&&****yyyyyzzzzzaa\nQueue Load877788778887112111221122 *A = Writing word 161& = Processing word 144# = Writing word 181z = Processing word 160 @ 779 ns* = Between 874 and 885 ns, the queue load is shown as 20 plus the digit shown at each ns.", "id": 244}, {"answer": "1. We will use malloc routine provided by C to implement the functionality. \nAllocate memory of size (bytes required + alignment \u2013 1 + sizeof(void*)) using malloc. \nalignment: malloc can give us any address and we need to find a multiple of align-ment. \n(Therefore, at maximum multiple of alignment, we will be alignment-1 bytes away \nfrom any location.)\nsizeof(size_t): We are returning a modified memory pointer to user, which is different \nfrom the one that would be returned by malloc. We also need to extra space to store \nthe address given by malloc, so that we can free memory in aligned_free by calling \nfree routine provided by C. \n2. If it returns NULL, then aligned_malloc will fail and we return NULL.\n3. Else, find the aligned memory address which is a multiple of alignment (call this p2).\n4. Store the address returned by malloc (e.g., p1 is just size_t bytes ahead of p2), which \nwill be required by aligned_free.\n5. Return p2.\n1 void* aligned_malloc(size_t required_bytes, size_t alignment) {\n2 void* p1; // original block\n3 void** p2; // aligned block\n4 int offset = alignment - 1 + sizeof(void*);\n5 if ((p1 = (void*)malloc(required_bytes + offset)) == NULL) {\n6 return NULL;\n7 }\n8 p2 = (void**)(((size_t)(p1) + offset) & ~(alignment - 1));\n9 p2[-1] = p1;\n10 return p2;\n11 }\n12 void aligned_free(void *p) {\n13 free(((void**)p)[-1]);\n14 }", "question": "Write an aligned malloc & free function that takes number of bytes and aligned byte \n(which is always power of 2) \nEXAMPLE\nalign_malloc (1000,128) will return a memory address that is a multiple of 128 and \nthat points to memory of size 1000 bytes. \naligned_free() will free memory allocated by align_malloc.", "id": 246}, {"answer": "We will use one call to malloc.\nAllocate one block of memory to hold the row vector and the array data. The row vector will \nreside in rows * sizeof(int*) bytes. The integers in the array will take up another rows * cols * \nsizeof(int) bytes.\nConstructing the array in a single malloc has the added benefit of allowing disposal of the \narray with a single free call rather \nthan using a special function to free the subsidiary data \nblocks.\n1 #include <malloc.h>\n2 \n3 int** My2DAlloc(int rows, int cols) {\n4 int header = rows * sizeof(int*);\n5 int data = rows * cols * sizeof(int);\n6 int** rowptr = (int**)malloc(header + data);\n7 int* buf = (int*)(rowptr + rows);\n8 int k;\n9 for (k = 0; k < rows; ++k) {\n10 rowptr[k] = buf + k*cols;\n11 }\n12 return rowptr;\n13 }", "question": "Write a function called my2DAlloc which allocates a two dimensional array. Minimize \nthe number of calls to malloc and make sure that the memory is accessible by the \nnotation arr[i][j].", "id": 248}, {"answer": "There\u2019s no right, or even complete, answer for this question. This question allows you to go \ninto arbitrary amounts of detail depending on what you\u2019re comfortable with. Here\u2019s a start \nthough:\n1. Browser contacts the DNS server to find the IP address of URL.\n2. DNS returns back the IP address of the site.\n3. Browser opens TCP connection to the web server at port 80.\n4. Browser fetches the html code of the page requested.\n5. Browser renders the HTML in the display window.\n6. Browser terminates the connection when window is closed.\nOne of the most interesting steps is Step 1 and 2 - \u201cDomain Name Resolution.\u201d The web ad-dresses we type are nothing but an alias to an IP address in human readable form. Mapping \nof domain names and their associated Internet Protocol (IP) addresses is managed by the \nDomain Name System (DNS), which is a distributed but hierarchical entity.\nEach domain name server is divided into zones. A single server may only be responsible for \nknowing the host names and IP addresses for a small subset of a zone, but DNS servers can \nwork together to map all domain names to their IP addresses. That means if one domain \nname server is unable to find the IP addresses of a requested domain then it requests the \ninformation from other domain name servers.", "question": "Explain what happens, step by step, after you type a URL into a browser. Use as much \ndetail as possible.", "id": 250}, {"answer": "Depending on the reader\u2019s level of understanding, knowledge, interest or career aspirations, \nhe or she may wish to explore beyond what is included here. Wikipedia and other websites \nare great places to look for a deeper understanding. We will provide only a short summary.\nBGP: Border Gateway Protocol\nBGP is the core routing protocol of the Internet. \u201cWhen a BGP router first comes up on the \nInternet, either for the first time or after being turned off, it establishes connections with \nthe other BGP routers with which it directly communicates. The first thing it does is down-load the entire routing table of each neighboring router. After that it only exchanges much \nshorter update messages with other routers.\nBGP routers send and receive update messages to indicate a change in the preferred path \nto reach a computer with a given IP address. If the router decides to update its own routing \ntables because this new path is better, then it will subsequently propagate this information \nto all of the other neighboring BGP routers to which it is connected, and they will in turn \ndecide whether to update their own tables and propagate the information further.\u201d\nBorrowed from http://www.livinginternet.com/i/iw_route_egp_bgp.htm.\nRIP: Routing Information Protocol \n\u201cRIP provides the standard IGP protocol for local area networks, and provides great network \nstability, guaranteeing that if one network connection goes down the network can quickly \nadapt to send packets through another connection. \u201c\n\u201cWhat makes RIP work is a routing database that stores information on the fastest route \nfrom computer to computer, an update process that enables each router to tell other rout-ers which route is the fastest from its point of view, and an update algorithm that enables \neach router to update its database with the fastest route communicated from neighboring \nrouters.\u201d\nBorrowing from http://www.livinginternet.com/i/iw_route_igp_rip.htm.\nOSPF: Open Shortest Path First\n\u201cOpen Shortest Path First (OSPF) is a particularly e\u02ddcient IGP routing protocol that is faster \nthan RIP, but also more complex.\u201d \nThe main difference between OSPF and RIP is that RIP only keeps track of the closest router \nfor each destination address, while OSPF keeps track of a complete topological database of \nall connections in the local network. The OSPF algorithm works as described below.\n\n \n\u00bb Startup. When a router is turned on it sends Hello packets to all of its neighbors, re-ceives their Hello packets in return, and establishes routing connections by synchroniz-ing databases with adjacent routers that agree to synchronize.\n \n\u00bb Update. At regular intervals each router sends an update message called its \u201clink state\u201d \ndescribing its routing database to all the other routers, so that all routers have the same \ndescription of the topology of the local network.\n \n\u00bb Shortest path tree. Each router then calculates a mathematical data structure called a \n\u201cshortest path tree\u201d that describes the shortest path to each destination address and \ntherefore indicates the closest router to send to for each communication; in other words -- \u201copen shortest path first\u201d.\nSee http://www.livinginternet.com/i/iw_route_igp_ospf.htm.", "question": "Explain any common routing protocol in detail. For example: BGP, OSPF, RIP.", "id": 252}, {"answer": "IPv4 and IPv6 are the internet protocols applied at the network layer. IPv4 is the most widely \nused protocol right now and IPv6 is the next generation protocol for internet.\n \n\u00bb IPv4 is the fourth version of Internet protocol which uses 32 bit addressing whereas IPv6 \nis a next generation internet protocol which uses 128 bits addressing.\n \n\u00bb IPv4 allows 4,294,967,296 unique addresses where as IPv6 can hold 340-undecillion (34, \n000, 000, 000, 000, 000, 000, 000, 000, 000, 000, 000, 000) unique IP addresses.\n \n\u00bb IPv4 has different class types: A,B,C,D and E. Class A, Class B, and Class C are the three \nclasses of addresses used on IP networks in common practice. Class D addresses are \nreserved for multicast. Class E addresses are simply reserved, meaning they should not \nbe used on IP networks (used on a limited basis by some research organizations for \nexperimental purposes).\n \n\u00bb IPv6 addresses are broadly classified into three categories:\n1. Unicast addresses: A Unicast address acts as an identifier for a single interface. An \nIPv6 packet sent to a Unicast address is delivered to the interface identified by \nthat address.\n2. Multicast addresses: A Multicast address acts as an identifier for a group / set of \ninterfaces that may belong to the different nodes. An IPv6 packet delivered to a \nmulticast address is delivered to the multiple interfaces.\n3. Anycast addresses: Anycast addresses act as identifiers for a set of interfaces that \nmay belong to the different nodes. An IPv6 packet destined for an Anycast ad-dress is delivered to one of the interfaces identified by the address.\n \n\u00bb IPv4 address notation: 239.255.255.255, 255.255.255.0\n \n\u00bb IPv6 addresses are denoted by eight groups of hexadecimal quartets separated by co-lons in between them.\n \n\u00bb An example of a valid IPv6 address: 2001:cdba:0000:0000:0000:0000:3257:9652\nBecause of the increase in the population, there is a need of Ipv6 protocol which can provide \nsolution for: \n1. Increased address space \n2. More e\u02ddcient routing \n3. Reduced management requirement \n\n4. Improved methods to change ISP \n5. Better mobility support \n6. Multi-homing \n7. Security \n8. Scoped address: link-local, site-local and global-address space", "question": "Compare and contrast the IPv4 and IPv6 protocols.", "id": 254}, {"answer": "A mask is a bit pattern used to identify the network/subnet address. The IP address consists \nof two \ncomponents: the network address and the host address.\nThe IP addresses are categorized into different classes which are used to identify the network \naddress.\nExample\n: Consider IP address 152.210.011.002. This address belongs to Class B, so:Network Mask: 11111111.11111111.00000000.00000000Given Address: 10011000.11010101.00001011.00000010\nBy ANDing Network Mask and IP Address, we get the following network address:10011000.11010101.00000000.00000000 (152.210.0.0)Host address: 00001011.00000010\nSimilarly, a network administrator can divide any network into sub-networks by using subnet \nmask. To do this, we further divide the host address into two or more subnets.\nFor example, if the above network is divided into 18 subnets (requiring a minimum of 5 bits \nto represent 18 subnets), the first 5 bits will be used to identify the subnet address.\nSubnet Mask: 11111111.11111111.11111000.00000000 (255.255.248.0)\nGiven Address: 10011000.11010101.00001011.00000010\nSo, by ANDing the subnet mask and the given address, we get the following subnet address: \n10011000.11010101.00001000.00000000 (152.210.1.0)\nHow Host A sends a message/packet to Host B:\nWhen both are on same network: the host address bits are used to identify the host within \nthe network.\nBoth are on different networks: the router uses the network mask to identify the network and \nroute the packet. The host can be identified using the network host address.\nThe network layer is responsible for making routing decisions. A routing table is used to \nstore the path information and the cost involved with that path, while a routing algorithm \nuses the routing table to decide the path on which to route the packets.\nRouting is broadly classified into Static and Dynamic Routing based on whether the table is \nfixed or it changes based on the current network condition.", "question": "What is a network / subnet mask? Explain how host A sends a message / packet to \nhost B when:\n (\na) both are on same network and (b) both are on different networks. \nExplain which layer makes the routing decision and how.", "id": 256}, {"answer": "TCP\n \n(Transmission Control Protocol)\n: TCP is a connection-oriented protocol. A connection can \nbe made from client to server, and from then on any data can be sent along that connection.\n \n\u00bb Reliable - when you send a message along a TCP socket, you know it will get there unless \nthe connection fails completely. If it gets lost along the way, the server will re-request \nthe lost part. This means complete integrity; data will not get corrupted.\n \n\u00bb Ordered - if you send two messages along a connection, one after the other, you know \nthe first message will get there first. You don\u2019t have to worry about data arriving in the \nwrong order.\n \n\u00bb Heavyweight - when the low level parts of the TCP \u201cstream\u201d arrive in the wrong order, re-send requests have to be sent. All the out of sequence parts must be put back together, \nwhich requires a bit of work.\nUDP(User Datagram Protocol): \nUDP is connectionless protocol. With UDP you send messages \n(packets) across the network in chunks.\n \n\u00bb Unreliable - When you send a message, you don\u2019t know if it\u2019ll get there; it could get lost \non the way.\n \n\u00bb Not ordered - If you send two messages out, you don\u2019t know what order they\u2019ll arrive in.\n \n\u00bb Lightweight - No ordering of messages, no tracking connections, etc. It\u2019s just fire and \nforget! This means it\u2019s a lot quicker, and the network card / OS have to do very little work \nto translate the data back from the packets.\nExplain how TCP handles reliable delivery (explain ACK mechanism), flow control (explain TCP \nsender\u2019s/receiver\u2019s window).\nFor each TCP packet, the receiver of a packet must acknowledge that the packet is received. \nIf there is no acknowledgement, the packet is sent again. These guarantee that every single \npacket is delivered. ACK is a packet used in TCP to acknowledge receipt of a packet. A TCP \nwindow is the amount of outstanding (unacknowledged by the recipient) data a sender can \nsend on a particular connection before it gets an acknowledgment back from the receiver \nthat it has gotten some of it.\nFor example, if a pair of hosts are talking over a TCP connection that has a TCP window with \na size of 64 KB, the sender can only send 64 KB of data and then it must wait for an acknowl-edgment from the receiver that some or all of the data has been received. If the receiver \n\nacknowledges that all the data has been received, then the sender is free to send another 64 \nKB. If the sender gets back an acknowledgment from the receiver that it received the first \n32 KB (which could happen if the second 32 KB was still in transit or it could happen if the \nsecond 32 KB got lost), then the sender can only send another additional 32 KB since it can\u2019t \nhave more than 64 KB of unacknowledged data outstanding (the second 32 KB of data plus \nthe third).\nCongestion Control\nThe TCP uses a network congestion avoidance algorithm that includes various aspects of an \nadditive-increase-multiplicative-decrease scheme, with other schemes such as slow-start in \norder to achieve congestion avoidance.\nThere are different algorithms to solve the problem; Tahoe and Reno are the most well \nknown. To avoid congestion collapse, TCP uses a multi-faceted congestion control strategy. \nFor each connection, TCP maintains a congestion window, limiting the total number of unac-knowledged packets that may be in transit end-to-end. This is somewhat analogous to TCP\u2019s \nsliding window used for flow control. TCP uses a mechanism called slow start to increase the \ncongestion window after a connection is initialized and after a timeout. It starts with a win-dow of two times the maximum segment size (MSS). Although the initial rate is low, the rate \nof increase is very rapid: for every packet acknowledged, the congestion window increases \nby 1 MSS so that for every round trip time (RTT), the congestion window has doubled. When \nthe congestion window exceeds a threshold ssthresh the algorithm enters a new state, called \ncongestion avoidance. In some implementations (i.e., Linux), the initial ssthresh is large, and \nso the first slow start usually ends after a loss. However, ssthresh is updated at the end of \neach slow start, and will often affect subsequent slow starts triggered by timeouts.", "question": "What are the differences between TCP and UDP? Explain how TCP handles reliable \ndelivery (explain ACK mechanism), flow control (explain TCP sender\u2019s / receiver\u2019s win-dow) and congestion control.", "id": 258}, {"answer": "Processes and threads are related to each other but are fundamentally different. \nA process can be thought of as an instance of a program in execution. Each process is an in-dependent entity to which system resources (CPU time, memory, etc.) are allocated and each \nprocess is executed in a separate address space. One process cannot access the variables \nand data structures of another process. If you wish to access another process\u2019 resources, \ninter-process communications have to be used such as pipes, files, sockets etc. \nA thread uses the same stack space of a process. A process can have multiple threads. A key \ndifference between processes and threads\n is that multiple threads share parts of their state. \nTypically, one allows multiple threads to read and write the same memory (no processes can \ndirectly access the memory of another process). However, each thread still has its own regis-ters and its own stack, but other threads can read and write the stack memory. \nA thread is a particular execution path of a process; when one thread modifies a process \nresource, the change is immediately visible to sibling threads.", "question": "What\u2019s the difference between a thread and a process?", "id": 260}, {"answer": "This is a tricky question, but let\u2019s start with a possible solution.\nA context switch is the time spent switching between two processes (e.g., bringing a wait-ing process into execution and sending an executing process into waiting/terminated state). \nThis happens in multitasking. The operating system must bring the state \ninformation of \nwaiting processes into memory and save the state information of the running process.\nIn order to solve this problem, we would like to record timestamps of the last and first in-struction of the swapping processes. The context switching time would be the difference in \nthe timestamps between the two processes.\nLet\u2019s take an easy example: Assume there are only two processes, P1 and P2.\nP1 is executing and P2 is waiting for execution. At some point, the OS must swap P1 and \nP2\u2014let\u2019s assume it happens at the Nth instruction of P1. So, the context switch time for this \nwould be Time_Stamp(P2_1) \u2013 Time_Stamp(P2_N)\nEasy enough. The tricky part is this: how do we know when this swapping occurs? Swap-ping is governed by the scheduling algorithm of the OS. We can not, of course, record the \ntimestamp of every instruction in the process. \nAnother issue: there are many kernel level threads which are also doing context switches, \nand the user does not have any control over them.\nOverall, we can say that this is mostly an approximate calculation which depends on the \nunderlying OS. One approximation could be to record the end instruction timestamp of a \nprocess and start timestamp of a process and waiting time in queue.\nIf the total timeof execution of all the processes was T, then the context switch time = T \u2013 \n(SUM for all processes (waiting time + execution time)).", "question": "How can you measure the time spent in a context switch?", "id": 262}, {"answer": "1 using namespace std;\n2 /* Place holder for thread synchronization lock */\n3 class Lock {\n4 public:\n5 Lock() { /* placeholder code to create the lock */ }\n6 ~Lock() { /* placeholder code to deallocate the lock */ }\n7 void AcquireLock() { /* placeholder to acquire the lock */ }\n8 void ReleaseLock() { /* placeholder to release the lock */ }\n9 };\n10 \n11 /* Singleton class with a method that creates a new instance of the \n12 * class of the type of the passed in template if it does not \n13 * already exist. */\n14 template <class T> class Singleton {\n15 private:\n16 static Lock lock;\n17 static T* object;\n18 protected:\n19 Singleton() { };\n20 public:\n21 static T * instance();\n22 }; \n23 Lock Singleton::lock;\n24 \n25 T * Singleton::Instance() {\n26 /* if object is not initialized, acquire lock */\n27 if (object == 0) {\n28 lock.AcquireLock();\n29 \n30 \n31 * should create the instance */\n32 if (object == 0) {\n33 object = new T;\n34 }\n35 lock.ReleaseLock();\n36 }\n37 return object;\n38 }\n\n39 \n40 int main() {\n41 \n\n42 Foo* singleton_foo = Singleton<Foo>::Instance(); \n43 return 0;\n44 }\nThe general method to make a program thread safe is to lock shared resources whenever \nwrite permission is given. This way, if one thread is modifying the resource, other threads \ncan not modify it.", "question": "Implement a singleton design pattern as a template such that, for any given class \nFoo, you can call Singleton::instance() and get a pointer to an instance of a singleton \nof type Foo. Assume the existence of a class Lock which has acquire() and release() \nmethods. How could you make your implementation thread safe and exception safe?", "id": 264}, {"answer": "For our solution, we implement a wait / die deadlock prevention scheme.\n1 class MyThread extends Thread {\n2 long time;\n3 ArrayList<Resource> res = new ArrayList<Resource>(); \n4 public ArrayList<Resource> getRes() { return res; }\n5 \n6 public void run() {\n7 \n8 time = System.currentTimeMillis();\n9 int count = 0;\n10 while (true) {\n11 if (count < 4) {\n12 if (Question.canAcquireResource(this, \n13 Question.r[count])) {\n14 res.add(Question.r[count]);\n15 count++;\n16 System.out.println(\u201cResource: [\u201c + \n17 Question.r[count - 1].getId() + \u201c] acquired by \n18 thread: [\u201c + this.getName() + \u201c]\u201d);\n19 try {\n20 sleep(1000);\n21 } catch (InterruptedException e) {\n22 e.printStackTrace();\n23 }\n24 }\n25 }\n26 else {\n27 this.stop();\n28 }\n29 }\n30 }\n31 \n32 public long getTime() { return time; }\n33 public void setRes(ArrayList<Resource> res) { this.res = res; }\n34 MyThread(String name) {\n35 super(name);\n36 }\n37 }", "question": "Design a class which provides a lock only if there are no possible deadlocks.", "id": 266}, {"answer": "i) Can you design a mechanism to make sure that B is executed after A, and C is executed after B?\n1 Semaphore s_a(0);\n2 Semaphore s_b(0);\n3 A {\n4 /***/\n5 s_a.release(1);\n6 }\n7 B {\n8 s_a.acquire(1);\n9 /****/\n10 s_b.release(1);\n11 }\n12 C {\n13 s_b.acquire(1);\n14 /******/\n15 }\nii) Can you design a mechanism to make sure that all the methods will be executed in sequence?\n1 Semaphore s_a(0);\n\n2 Semaphore s_b(0);\n3 Semaphore s_c(1);\n4 A {\n5 s_c.acquire(1);\n6 /***/\n7 s_a.release(1);\n8 }\n9 B {\n10 s_a.acquire(1);\n11 /****/\n12 s_b.release(1);\n13 }\n14 C {\n15 s_b.acquire(1);\n16 /******/\n17 s_c.release(1);\n18 }", "question": "Suppose we have the following code: class Foo { public: A(.....); /* If A is called, a new thread will be created and * the corresponding function will be executed. */ B(.....); /* same as above */ C(.....); /* same as above */ } Foo f; f.A(.....); f.B(.....); f.C(.....);\ni) Can you design a mechanism to make sure that B is executed after A, and C is ex-ecuted after B?\niii) Suppose we have the following code to use class Foo. We do not know how the \nthreads will be scheduled in the OS. Foo f; f.A(.....); f.B(.....); f.C(.....); f.A(.....); f.B(.....); f.C(.....);\nCan you design a mechanism to make sure that all the methods will be executed in \nsequence?", "id": 268}, {"answer": "Java provides two ways \nto achieve synchronization: synchronized method and synchronized \nstatement.\nSynchronized method: Methods of a class which need to be synchronized are declared with \n\u201csynchronized\u201d keyword. If one thread is executing a synchronized method, all other threads \nwhich want to execute any of the synchronized methods on the same objects get blocked.\nSyntax: method1 and method2 need to be synchronized\n1 public class SynchronizedMethod {\n2 // Variables declaration\n3 public synchronized returntype Method1() {\n4 // Statements\n5 }\n6 public synchronized returntype method2() {\n7 // Statements\n8 }\n9 // Other methods\n10 }\nSynchronized statement: It provides the synchronization for a group of statements rather \nthan a method as a whole. It needs to provide the object on which these synchronized state-ments will be applied, unlike in a synchronized method.\nSyntax: synchronized statements on \u201cthis\u201d object\n1 synchronized(this) {\n2 /* statement 1\n3 * ...\n4 * statement N */\n5 } \ni) If you have two threads in one instance of a program, can they call A at the same time?\nNot possible; read the above paragraph.\nii) Can they call A and C at the same time?\nYes. Only methods of the same object which are declared with the keyword synchronized \ncan\u2019t be interleaved.", "question": "You are given a class with synchronized method A, and a normal method C. If you \nhave two threads in one instance of a program, can they call A at the same time? Can \nthey call A and C at the same time?", "id": 270}, {"answer": "This is a classic interview problem. If you haven\u2019t heard this problem before, you can ap-proach it by taking the \ndifference between a and b:\n1 public static void swap(int a, int b) {\n2 a = b - a; // 9 - 5 = 4\n3 b = b - a; // 9 - 4 = 5\n4 a = a + b; // 4 + 5 = 9\n5 \n6 System.out.println(\u201ca: \u201c + a);\n7 System.out.println(\u201cb: \u201c + b);\n8 }\nYou can then optimize it as follows:\n1 public static void swap_opt(int a, int b) {\n2 a = a^b; \n3 b = a^b; \n4 a = a^b; \n5 \n6 System.out.println(\u201ca: \u201c + a);\n7 System.out.println(\u201cb: \u201c + b);\n8 }", "question": "Write a function to swap a number in place without temporary variables.", "id": 272}, {"answer": "The first thing to ask your interviewer is whether the hasWon function will be called just \nonce, or multiple times. If it will be called multiple times, you can get a very fast algorithm \nby amortizing the cost (especially if you can design your own data storage system for the \ntic-tac-toe board).\nApproach #1: If hasWon is called many times\nThere are only 3^9, or about \ntwenty thousand tic-tac-toe boards. We can thus represent our \ntic-tac-toe board as an int, with each digit representing a piece (0 means Empty, 1 means \nRed, 2 means Blue). We set up a hashtable or array in advance with all possible boards as \nkeys, and the values are 0, 1, and 2. Our function then is simply this:int hasWon(int board) { return winnerHashtable[board];}\nEasy!\nApproach #2: If hasWon is only called once\n1 enum Piece { Empty, Red, Blue };\n2 enum Check { Row, Column, Diagonal, ReverseDiagonal }\n3 \n4 Piece getIthColor(Piece[][] board, int index, int var, Check check) {\n5 if (check == Check.Row) return board[index][var];\n6 else if (check == Check.Column) return board[var][index];\n7 else if (check == Check.Diagonal) return board[var][var];\n8 else if (check == Check.ReverseDiagonal) \n9 return board[board.length - 1 - var][var];\n10 return Piece.Empty;\n11 }\n12 \n13 \n14 \n\n15 if (color == Piece.Empty) return Piece.Empty;\n16 for (int var = 1; var < board.length; var++) { \n17 \n18 return Piece.Empty;\n19 } \n20 }\n21 return color;\n22 }\n23 \n\n24 Piece hasWon(Piece[][] board) {\n25 int N = board.length;\n26 Piece winner = Piece.Empty;\n27 \n28 // Check rows and columns\n29 for (int i = 0; i < N; i++) {\n30 winner = getWinner(board, i, Check.Row);\n31 if (winner != Piece.Empty) {\n32 return winner;\n33 }\n34 \n35 winner = getWinner(board, i, Check.Column);\n36 if (winner != Piece.Empty) {\n37 return winner;\n38 }\n39 } \n40 \n41 winner = getWinner(board, -1, Check.Diagonal);\n42 if (winner != Piece.Empty) {\n43 return winner;\n44 }\n45 \n46 // Check diagonal\n47 winner = getWinner(board, -1, Check.ReverseDiagonal);\n48 if (winner != Piece.Empty) {\n49 return winner;\n50 } \n51 \n52 return Piece.Empty;\n53 }\n \n \n\u00bb Note that the runtime could be reduced to \nO(N) with the addition of row and column \ncount arrays (and two sums for the diagonals)\n \n\u00bb A common follow up (or tweak) to this question is to write this code for an NxN board.", "question": "Design an algorithm to figure out if someone has won in a game of tic-tac-toe.", "id": 274}, {"answer": "Trailing zeros are contributed by pairs of 5 and 2, because 5*2 = 10. To count the number of \npairs, we just have to count the number of multiples of 5. Note that while 5 contributes to \none multiple of 10, 25 contributes two (because 25 = 5*5).\n1 public static int numZeros(int num) {\n2 int count = 0;\n3 if (num < 0) {\n4 \n5 return 0;\n6 }\n7 for (int i = 5; num / i > 0; i *= 5) {\n8 count += num / i;\n9 }\n10 return count;\n11 }\nLet\u2019s walk through an example to see how this works: Suppose num = 26. In the first loop, we \ncount how many multiples of five there are by doing 26 / 5 = 5 (these multiples are 5, 10, 15, \n20, and 25). In the next loop, we count how many multiples of 25 there are: 26 / 25 = 1 (this \nmultiple is 25). Thus, we see that we get one zero from 5, 10, 15 and 20, and two zeros from \n25 (note how it was counted twice in the loops). Therefore, 26! has six zeros.\n \n\u00bb This is a bit of a brain teaser, but it can be approached logically (as shown above). By \nthinking through what exactly will contribute a zero, and what doesn\u2019t matter, you can \ncome up with a solution. Again, be very clear in your rules up front so that you can \nimplement this correctly.", "question": "Write an algorithm which computes the number of trailing zeros in n factorial.", "id": 276}, {"answer": "Let\u2019s try to solve this by \u201cre-wording\u201d the problem. We will re-word the problem until we get \nsomething that has removed all if statements.\nRewording 1: If a >\n b, return a; else, return b.\nRewording 2: If (a - b) is negative, return b; else, return a.\nRewording 3: If (a - b) is negative, let k = 1; else, let k = 0. Return a - k * (a - b).\nRewording 4: Let c = a - b. Let k = the most significant bit of c. Return a - k * c.\nWe have now reworded the problem into something that fits the requirements. The code \nfor this is below.\n1 int getMax(int a, int b) {\n2 int c = a - b;\n3 int k = (c >> 31) & 0x1;\n4 int max = a - k * c;\n5 return max;\n6 }", "question": "Write a method which finds the maximum of two numbers. You should not use if-else \nor any other comparison operator.\nEXAMPLE\nInput: 5, 10\nOutput: 10", "id": 278}, {"answer": "This problem is straight-forward. We simply check the number of hits and pseudo-hits. We \nwill store the number of each\n in a class. To do a quick lookup to see it an element is a pseudo-hit, we will use a bit mask.\n1 public static class Result {\n2 public int hits;\n3 public int pseudoHits;\n4 };\n5 \n6 public static Result estimate(String guess, String solution) {\n7 Result res = new Result();\n8 int solution_mask = 0;\n9 for (int i = 0; i < 4; ++i) {\n10 solution_mask \n11 }\n12 for (int i = 0; i < 4; ++i) {\n13 if (guess.charAt(i) == solution.charAt(i)) {\n14 ++res.hits;\n15 } else if ((solution_mask & \n16 (1 << (1 + guess.charAt(i) - \u201aA\u2019))) >= 1) {\n17 ++res.pseudoHits;\n18 }\n19 }\n20 return res;\n21 }", "question": "The Game of Master Mind is played as follows: \nThe computer has four slots containing balls that are red (R), yellow (Y), green (G) or \nblue (B). For example, the computer might have RGGB (e.g., Slot #1 is red, Slots #2 and \n#3 are green, Slot #4 is blue).\nYou, the user, are trying to guess the solution. You might, for example, guess YRGB.\nWhen you guess the correct color for the correct slot, you get a \u201chit\u201d. If you guess \na color that exists but is in the wrong slot, you get a \u201cpseudo-hit\u201d. For example, the \nguess YRGB has 2 hits and one pseudo hit.\nFor each guess, you are told the number of hits and pseudo-hits. \nWrite a method that, given a guess and a solution, returns the number of hits and \npseudo hits.", "id": 280}, {"answer": "This is not an especially challenging problem, but it is a long and tedious one. Your inter-viewer is unlikely to ask to see every detail, but he / she will be interested in how you ap-proach the problem.\n1 public static String numtostring(int num) {\n2 StringBuilder sb = new StringBuilder();\n3 \n4 // Count number of digits in num.\n5 int len = 1;\n6 while (Math.pow((double)10, (double)len ) < num) {\n7 len++;\n8 }\n9 \n10 String[] wordarr1 = {\u201c\u201d,\u201dOne \u201d, \u201cTwo \u201d, \u201cThree \u201d, \u201cFour \u201d, \n11 \u201cFive \u201d, \u201cSix \u201d, \u201cSeven \u201d, \u201cEight \u201d,\u201dNine \u201d};\n12 String[] wordarr11 = {\u201c\u201d, \u201cEleven \u201d, \u201cTwelve \u201d, \u201cThirteen \u201d, \n13 \u201cFourteen \u201d, \u201cFifteen \u201d, \u201cSixteen \u201d, \n14 \u201cSeventeen \u201d, \u201cEighteen \u201d, \u201cNineteen \u201d};\n15 String[] wordarr10 = {\u201c\u201d,\u201dTen \u201d, \u201cTwenty \u201d, \u201cThirty \u201d, \u201cForty \u201d, \n16 \u201cFifty \u201d, \u201cSixty \u201d, \u201cSeventy \u201d, \u201cEighty \u201d, \n17 \u201cNinety \u201c};\n18 String[] wordarr100 = {\u201c\u201d, \u201cHundred \u201d, \u201cThousand \u201d};\n19 int tmp;\n20 if (num == 0) {\n21 sb.append(\u201cZero\u201d);\n22 } else {\n23 if (len > 3 && len % 2 == 0) {\n24 len++;\n25 }\n26 do {\n27 // Number greater than 999\n28 if (len > 3) {\n29 tmp = (num / (int)Math.pow((double)10,(double)len-2));\n30 // If tmp is 2 digit number and not a multiple of 10\n31 if (tmp / 10 == 1 && tmp%10 != 0) {\n32 sb.append(wordarr11[tmp % 10]) ;\n33 } else {\n34 sb.append(wordarr10[tmp / 10]);\n35 sb.append(wordarr1[tmp % 10]);\n36 }\n\n37 if (tmp > 0) {\n38 sb.append(wordarr100[len / 2]);\n39 }\n40 num = num % (int)(Math.pow((double)10,(double)len-2));\n41 len = len-2;\n42 } else { // Number is less than 1000\n43 tmp = num / 100;\n44 if (tmp != 0) {\n45 sb.append(wordarr1[tmp]);\n46 sb.append(wordarr100[len / 2]);\n47 }\n48 tmp = num % 100 ;\n49 if(tmp / 10 == 1 && tmp % 10 != 0) {\n50 sb.append(wordarr11[tmp % 10]) ;\n51 } else {\n52 sb.append(wordarr10[tmp / 10]);\n53 sb.append(wordarr1[tmp % 10]);\n54 }\n55 len = 0;\n56 }\n57 } while(len > 0);\n58 }\n59 return sb.toString();\n60 }", "question": "Given an integer between 0 and 999,999, print an English phrase that describes the \ninteger (eg, \u201cOne Thousand, Two Hundred and Thirty Four\u201d).", "id": 282}, {"answer": "A simple linear algorithm will work by keeping track of the current subsequence sum. If that \nsum ever drops below zero, that subsequence will not contribute to the subsequent maximal \nsubsequence since it \nwould reduce it by adding the negative sum.\n1 public static int getMaxSum(int[] a) {\n2 int maxsum = 0;\n3 int sum = 0;\n4 for (int i = 0; i < a.length; i++) {\n5 sum += a[i];\n6 if (maxsum < sum) {\n7 maxsum = sum;\n8 } else if (sum < 0) {\n9 sum = 0;\n10 }\n11 }\n12 return maxsum;\n13 }\nNOTE: If the array is all negative numbers, what is the correct behavior? Con-sider this simple array {-3, -10, -5}. You could make a good argument that the \nmaximum sum is either: (A) -3 (if you assume the subsequence can\u2019t be empty) \n(B) 0 (the subsequence has length 0) or (C) MINIMUM_INT (essentially the error \ncase). We went with option B (max sum = 0), but there\u2019s no \u201ccorrect\u201d answer. This \nis a great thing to discuss with your interviewer to show how careful you are.", "question": "You are given an \narray of integers (both positive and negative). Find the continuous \nsequence with the largest \nsum. Return the sum.\n \nEXAMPLE\nInput: {2, -8, 3, -2, 4, -10}\nOutput: 5 (i.e., {3, -2, 4} )", "id": 284}, {"answer": "The first question \u2013 which you should ask your interviewer \u2013 is if you\u2019re just asking for a \nsingle word (\u201csingle query\u201d) or if you might, eventually, use the same method for many dif-ferent words (\u201crepetitive queries\u201d)? That is, are you simply asking for the frequency of \u201cdog\u201d, \nor might you ask for \u201cdog,\u201d and then \u201ccat,\u201d \u201cmouse,\u201d etc?\nSolution: Single Query\nIn this case, we simply go through the book, word by word, and count the number of times \nthat a word appears. This will take O(n) time. We know we can\u2019t do better than that, as we \nmust look at every word in the book.\nSolution: Repetitive Queries\nIn this case, we create a hash table which maps from a word to a frequency. Our \ncode is then \nlike this:\n1 Hashtable<String, Integer> setupDictionary(String[] book) {\n2 Hashtable<String, Integer> table = \n3 new Hashtable<String, Integer>();\n4 for (String word : book) {\n5 word = word.toLowerCase();\n6 if (word.trim() != \u201c\u201d) {\n7 if (!table.containsKey(word)) table.put(word, 0);\n8 table.put(word, table.get(word) + 1);\n9 }\n10 }\n11 return table;\n12 }\n13 \n14 int getFrequency(Hashtable<String, Integer> table, String word) {\n15 if (table == null \n16 word = word.toLowerCase();\n17 if (table.containsKey(word)) {\n18 return table.get(word);\n19 }\n20 return 0;\n21 }\nNote: a problem like this is relatively easy. Thus, the interviewer is going to be \nlooking heavily at how careful you are. Did you check for error conditions?", "question": "Design a method to find the frequency of occurrences of any given word in a book.", "id": 286}, {"answer": "Part 1: Solution\nThis solution \ntokenizes the input and then encodes the items, element by element.\nNOTE: See code attachment for full, executable code. We have included an ab-breviated section here.\n1 private Map<String, Byte> tagMap;\n2 \n3 private List<String> tokens;\n4 private int currentTokenIndex;\n5 \n6 byte[] encode(char[] input) throws IOException {\n7 tokenize(input);\n8 currentTokenIndex = 0;\n9 ByteArrayOutputStream outputStream = new ByteArrayOutputStream();\n10 encodeTokens(outputStream);\n11 return outputStream.toByteArray();\n12 }\n13 \n14 void encodeTokens(ByteArrayOutputStream output) {\n15 nextToken(\u201c<\u201d);\n16 \n17 // read tag name\n18 String tagName = nextToken();\n19 output.write(getTagCode(tagName));\n20 \n21 // read attributes\n\n22 while (!hasNextToken(\u201c>\u201d) && !hasNextTokens(\u201c/\u201d, \u201c>\u201d)) {\n23 // read next attribute\n24 String key = nextToken();\n25 nextToken(\u201c=\u201d);\n26 String value = nextToken();\n27 output.write(getTagCode(key));\n28 for (char c : value.toCharArray()) {\n29 output.write(c);\n30 }\n31 output.write(END[0]);\n32 output.write(END[1]);\n33 }\n34 // end of attributes\n35 output.write(END[0]);\n36 output.write(END[1]);\n37 \n\n38 if (hasNextTokens(\u201c/\u201d, \u201c>\u201d)) {\n39 nextToken(\u201c/\u201d);\n40 nextToken(\u201c>\u201d);\n41 } else {\n42 nextToken(\u201c>\u201d);\n43 // while not the end tag\n44 while (!hasNextTokens(\u201c<\u201d, \u201c/\u201d)) {\n45 encodeTokens(output); // encode child\n46 }\n47 // ending tag\n48 nextToken(\u201c<\u201d);\n49 nextToken(\u201c/\u201d);\n50 nextToken(tagName);\n51 nextToken(\u201c>\u201d);\n52 }\n53 output.write(END[0]);\n54 output.write(END[1]);\n55 }\nPart 2: Is there anything you can do to compress this further?\nYou can treat the file as a general stream of characters and use any number of compression \ntechniques: Shannon\u2013Fano coding, Huffman coding or Arithmetic coding.", "question": "Since XML is very verbose, you are given a way of encoding it where each tag gets \nmapped to a pre-defined integer value. The language/grammar is as follows: Element --> Element Attr* END Element END [aka, encode the element tag, then its attributes, then tack on an END character, then encode its children, then another end tag] Attr --> Tag Value [assume all values are strings] END --> 01\n Value --> string value END\nWrite code to print the encoded version of an xml element (passed in as string).\nFOLLOW UP\nIs there anything else you could do to (in many cases) compress this even further?", "id": 288}, {"answer": "First, observe that we cannot do this in a guaranteed finite amount of time. Why? Let\u2019s see by \na parallel example: How would you use rand2() to create rand3()?\nObserve that each call of rand2() and the corresponding decision you make can be repre-sented by a decision tree. On each node, you have two branches. You take the left one when \nrand2() equals 0 (which happens with 1/2 probability). You take the right one when rand2() \nequals 1 (which happens with 1/2 probability). You continue branching left and right as you \ncontinue to call 1/2. When you reach a leaf, you return a result of 1, 2 or 3 (your rand3() re-sults). \n \n\u00bb What\u2019s the probability of taking each branch? 1/2. \n \n\u00bb What\u2019s the probability to reach a particular leaf node? 1/2^j (for some j). \n \n\u00bb What the probability of returning 3 (for example)? We could compute this by summing \nup the probabilities of reaching each leaf node with value 3. Each of these paths has \nprobability 1/2^j, so we know that the total probability of returning 3 must be a series \nof terms of reciprocal powers of 2 (e.g., 1/2^x + 1/2^y + 1/2^z + \u2013). \nWe also know, however, that the probability of returning 3 must be 1/3 (because rand3() \nshould be perfectly random). Can you find a series of reciprocal powers of 2 that sum to 1/3? \nNo, because 3 and 2 are relatively prime.\nWe can similarly conclude that to solve this problem, we will need to accept a small (infini-tesimally small) chance that this process will repeat forever. That\u2019s ok.\nSo, how do we solve this?\nIn order to generate a random number between 1 and 7, we just need to uniformly generate \na larger range than we are looking for and then repeatedly sample until we get a number that \nis good for us. We will generate a base 5 number with two places with two calls to the RNG.public static int rand7() { while (true) { int num = 5 * (rand5() - 1) + (rand5() - 1); if (num < 21) return (num % 7 + 1); }}", "question": "Write a method to generate a \nrandom number between 1 and 7, given a method \nthat generates a random number between 1 and 5 (i.e., implement rand7() using \nrand5()).", "id": 290}, {"answer": "One easy and (time) e\u02ddcient solution involves a hash map from integers to integers. This al-gorithm works by iterating through the array. On each element x, look up sum - x in the hash \ntable and, if it exists, print (x, sum - x). Add x to the hash table, and go to the next element.\nDefinition of Complement:\n If we\u2019re trying to find a pair of numbers that sums to z, the comple-ment of x will be z - x (that is, the number that can be added to x to make z). For example, \nif we\u2019re trying to find a pair of numbers that sum to 12, the complement of \u20135 would be 17.\nThe Algorithm:\n Imagine we have the following sorted array: {-2 -1 0 3 5 6 7 9 13 14 }. Let \nfirst\n \npoint to the head of the array and \nlast\n point to the end of the array. To find the complement \nof \nfirst\n, we just move \nlast\n backwards until we find it. If \nfirst + last < sum\n, then there is no com-plement for \nfirst\n. We can therefore move \nfirst\n forward. We stop when \nfirst\n is greater than \nlast\n.\nWhy must this find all complements for \nfirst\n? Because the array is sorted and we\u2019re trying \nprogressively smaller numbers. When the sum of \nfirst\n and \nlast\n is less than the sum, we know \nthat trying even smaller numbers (as \nlast\n) won\u2019t help us find a complement.\nWhy must this find all complements for \nlast\n? Because all pairs must be made up of a \nfirst\n and \na last. We\u2019ve found all complements for \nfirst\n, therefore we\u2019ve found all complements of \nlast\n.\n1 public static void printPairSums(int[] array, int sum) {\n2 Arrays.sort(array);\n3 \n\n4 int last = array.length - 1;\n5 \n\n6 \n7 if (s == sum) {\n8 \n9 \n10 --last;\n11 } else {\n12 \n13 else --last;\n14 }\n15 }\n16 }", "question": "Design an algorithm to find all pairs of integers within an \narray which \nsum to a speci-fied value.", "id": 292}, {"answer": "To investigate this problem, let\u2019s start off by gaining a deeper understanding of how we add \nnumbers. We\u2019ll work in Base 10 so that it\u2019s easier to see. To add 759 + 674, I would usually add \ndigit[0] from each number, carry the one, add digit[1] from each number, carry the one, etc. \nYou could take the same approach in binary: add each digit, and carry the one as necessary.\nCan we make this a little easier? Yes! Imagine I decided to split apart the \u201caddition\u201d and \n\u201ccarry\u201d steps. That is, I do the following:\n1. Add 759 + 674, but \u201cforget\u201d to carry. I then get 323.\n2. Add 759 + 674 but only do the carrying, rather than the addition of each digit. I then \nget 1110.\n3. Add the result of the first two operations (recursively, using the same process de-scribed in step 1 and 2): 1110 + 323 = 1433.\nNow, how would we do this in binary?\n1. If I add two binary numbers together but forget to carry, bit[i] will be 0 if bit[i] in a and \nb are both 0 or both 1. This is an XOR.\n2. If I add two numbers together but only carry, I will have a 1 in bit[i] if bit[i-1] in a and b \nare both 1\u2019s. This is an AND, shifted.\n3. Now, recurse until there\u2019s \nnothing to carry.\n1 int add_no_arithm(int a, int b) {\n2 if (b == 0) return a;\n3 int sum = a ^ b; // add without carrying\n4 int carry = (a & b) << 1; // carry, but don\u2019t add\n5 return add_no_arithm(sum, carry); // recurse\n6 }\nThe Approach: There are a couple of suggestions for figuring out this problem:\n1. Our first instinct in problems like these should be that we\u2019re going to have to work \nwith bits. Why? Because when you take away the + sign, what other choice do we \nhave? Plus, that\u2019s how computers do it.\n\n2. Our next thought in problems like these should be to really, really understand how \nyou add. Walk through an addition problem to see if you can understand something \nnew\u2014some pattern\u2014and then see if you can replicate that with code.\nYour interviewer is looking for two things in this problem:\n1. Can you break down a problem and solve it?\n2. Do you understand how to work with bits?", "question": "Write a function that \nadds two numbers. You should not use + or any arithmetic op-erators.", "id": 294}, {"answer": "This is a very well known interview question, and a well known algorithm. If you aren\u2019t one \nof the lucky few to have already know this algorithm, read on.\nLet\u2019s start with a brute force approach: we could randomly selecting items and put them into \na new array. We must make sure that we don\u2019t pick the same item twice though by somehow \nmarking the node as dead.Array: [1] [2] [3] [4] [5]Randomly select 4: [4] [?] [?] [?] [?]Mark element as dead: [1] [2] [3] [X] [5]\nThe tricky part is, how do we mark [4] as dead such that we prevent that element from be-ing picked again? One way to do it is to swap the now-dead [4] with the first element in the \narray:Array: [1] [2] [3] [4] [5]Randomly select 4: [4] [?] [?] [?] [?]Swap dead element: [X] [2] [3] [1] [5]Array: [X] [2] [3] [1] [5]Randomly select 3: [4] [3] [?] [?] [?]Swap dead element: [X] [X] [2] [1] [5]\nBy doing it this way, it\u2019s much easier for the algorithm to \u201cknow\u201d that the first k elements are \ndead than that the third, fourth, nineth, etc elements are dead. We can also optimize this by \nmerging the shunled array and the original array.Randomly select 4: [4] [2] [3] [1] [5]Randomly select 3: [4] [3] [2] [1] [5]\nThis is an easy algorithm to implement iteratively:\n1 int[] cards) { \n2 int temp, index; \n3 for (int i = 0; i < cards.length; i++){ \n4 index = (int) (Math.random() * (cards.length - i)) + i; \n5 temp = cards[i]; \n6 cards[i] = cards[index];\n7 cards[index] = temp; \n8 } \n9 }", "question": "Write a method to shunle a deck of cards. It must be a perfect shunle - in other words, \neach 52! permutations of the deck has to be equally likely. Assume that you are given \na random number generator which is perfect.", "id": 296}, {"answer": "Our first instinct on this problem might be to \nrandomly pick elements from the array and put \nthem into our new subset array. But then, what if we pick the same element twice? Ideally, \nwe\u2019d want to somehow \u201cshrink\u201d the array to no longer contain that element. Shrinking is \nexpensive though because of all the shifting required. \nInstead of shrinking / shifting, we can swap the element with an element at the beginning \nof the array and then \u201cremember\u201d that the array now only includes elements j and greater. \nThat is, when we pick subset[0] to be array[k], we replace array[k] with the first element in \nthe array. When we pick subset[1], we consider array[0] to be \u201cdead\u201d and we pick a random \nelement y between 1 and array.size(). We then set subset[1] equal to array[y], and set array[y] \nequal to array[1]. Elements 0 and 1 are now \u201cdead.\u201d Subset[2] is now chosen from array[2] \nthrough array[array.size()], and so on.\n1 /* Random number between lower and higher, inclusive */\n2 public static int rand(int lower, int higher) { \n3 return lower + (int)(Math.random() * (higher - lower + 1));\n4 }\n5 \n6 /* pick M elements from original array. Clone original array so that\n7 * we don\u2019t destroy the input. */\n8 public static int[] pickMRandomly(int[] original, int m) {\n9 int[] subset = new int[m];\n10 int[] array = original.clone();\n11 for (int j = 0; j < m; j++) {\n12 int index = rand(j, array.length - 1); \n13 subset[j] = array[index];\n14 array[index] = array[j]; // array[j] is now \u201cdead\u201d\n15 }\n16 return subset;\n17 }", "question": "Write a method to randomly generate a set of m integers from an array of size n. Each \nelement must have equal probability of being chosen.", "id": 298}, {"answer": "Picture a sequence of numbers: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29...110 111 112 113 114 115 116 117 118 119\nThe last digit will be repeated every 10 numbers, the last two digits will be repeated every \n10^2 numbers, the last 3 digits will be repeated every 10^3 numbers, etc.\nSo, if there are X 2s between 0 and 99, then we know there are 2x twos between 0 and 199. \nBetween 0 and 299, we have 3x twos from the last two digits, and another 100 2s from the \nfirst digit.\nIn other words, we can look at a number like this:f(513) = 5 * f(99) + f(13) + 100\nTo break this down individually:\n \n\u00bb The sequence of the last two digits are repeated 5 times, so add 5 * f(99)\n \n\u00bb We need to account for the last two digits in 500 -> 513, so add f(13)\n \n\u00bb We need to account for the first digit being two between 200 -> 299, so add 100\nOf course, if n is, say, 279, we\u2019ll need to account for this slightly differently: f(279) = 2 * f(99) + f(79) + 79 + 1\nTo break this down individually:\n \n\u00bb The sequence of the last two digits are repeated 2 times, so add 2 * f(99)\n \n\u00bb We need to account for the last two digits in 200 -> 279, so add f(79)\n \n\u00bb We need to account for the first digit being two between 200 -> 279, so add 79 + 1\nRecu rsive Code:\n1 public static int count2sR(int n) { \n2 // Base case\n3 if (n == 0) return 0;\n4 \n5 // 513 into 5 * 100 + 13. [Power = 100; First = 5; Remainder = 13]\n6 int power = 1;\n7 while (10 * power < n) power *= 10;\n8 \n\n9 int remainder = n % power;\n\n10 \n11 \n\n12 int nTwosFirst = 0;\n13 \n\n14 \n\n15 \n16 // Count 2s from all other digits\n17 \n\n18 \n19 return nTwosFirst + nTwosOther;\n20 }\nWe can also implement this algorithm iteratively:\n1 public static int count2sI(int num) {\n2 int countof2s = 0, digit = 0;\n3 int j = num, seendigits=0, position=0, pow10_pos = 1;\n4 /* maintaining this value instead of calling pow() is an 6x perf \n5 * gain (48s -> 8s) pow10_posMinus1. maintaining this value\n6 * instead of calling Numof2s is an 2x perf gain (8s -> 4s). \n7 * overall > 10x speedup */\n8 while (j > 0) {\n9 digit = j % 10;\n10 int pow10_posMinus1 = pow10_pos / 10;\n11 countof2s += digit * position * pow10_posMinus1;\n12 /* we do this if digit <, >, or = 2\n13 * Digit < 2 implies there are no 2s contributed by this \n14 * digit. \n15 * Digit == 2 implies there are 2 * numof2s contributed by\n16 * the previous position + num of 2s contributed by the \n17 * presence of this 2 */\n18 if (digit == 2) {\n19 countof2s += seendigits + 1;\n20 }\n21 /* Digit > 2 implies there are digit * num of 2s by the prev. \n22 * position + 10^position */\n23 else if(digit > 2) {\n24 countof2s += pow10_pos;\n25 }\n26 seendigits = seendigits + pow10_pos * digit;\n27 pow10_pos *= 10;\n28 position++; \n29 j = j / 10;\n30 }\n31 return(countof2s);\n32 }", "question": "Write a method to count the number of 2s between 0 and n.", "id": 300}, {"answer": "We will assume for this question that the word order does not matter. This is a question you \nshould ask your interviewer. If the word order does matter, we can \nmake the small modifica-tion shown in the code below.\nTo solve this problem, simply traverse the file and for every occurrence of word1 and word2, \ncompare difference of positions and update the current minimum.\n1 int shortest(String[] words, String word1, String word2) {\n2 int pos = 0;\n3 int min = Integer.MAX_VALUE / 2;\n4 int word1_pos = -min;\n5 int word2_pos = -min;\n6 for (int i = 0; i < words.length; i++) {\n7 String current_word = words[i];\n8 if (current_word.equals(word1)) {\n9 word1_pos = pos;\n10 // Comment following 3 lines if word order matters\n11 int distance = word1_pos - word2_pos;\n12 if (min > distance) \n13 min = distance;\n14 } else if (current_word.equals(word2)) {\n15 word2_pos = pos;\n16 int distance = word2_pos - word1_pos;\n17 if (min > distance) min = distance;\n18 }\n19 ++pos;\n20 }\n21 return min;\n22 }\nTo solve this problem in less time (but more space), we can create a \nhash table with each \nword and the locations where it occurs. We then just need to find the minimum (arithmetic) \ndifference in the locations (e.g., abs(word0.loc[1] - word1.loc[5])).\nTo find the minimum arithmetic difference, we take each location for word1 (e.g.: 0, 3} and \ndo a modified binary search for it in word2\u2019s location list, returning the closest number. Our \nsearch for 3, for example, in {2, 7, 9} would return 1. The minimum of all these binary searches \nis the shortest distance.", "question": "You have a large text file containing words. Given any two words, find the shortest \ndistance (in terms of number of words) between them in the file. Can you make the \nsearching operation in O(1) time? What about the space complexity for your solu-tion?", "id": 302}, {"answer": "Approach 1: \nSorting\nSort the elements and then take the first million numbers from that. Complexity is O(n log n).\nApproach 2: Max Heap\n1. Create a Min Heap with the first million numbers.\n2. For each remaining number, insert it in the Min Heap and then delete the minimum \nvalue from the heap.\n3. The heap now contains the largest million numbers.\n4. This algorithm is \nO(n log m), where m is the number of values we are looking for.\nApproach 3: Selection Rank Algorithm (if you can modify the original \narray)\nSelection Rank is a well known algorithm in computer science to find the ith smallest (or \nlargest) element in an array in expected linear time. The basic algorithm for finding the ith \nsmallest elements goes like this:\n \n\u00bb Pick a random element in the array and use it as a \u201apivot\u2019. Move all elements smaller than \nthat element to one side of the array, and all elements larger to the other side.\n \n\u00bb If there are exactly i elements on the right, then you just find the smallest element on \nthat side.\n \n\u00bb Otherwise, if the right side is bigger than i, repeat the algorithm on the right. If the right \nside is smaller than i, repeat the algorithm on the left for i \u2013 right.size().\nGiven this algorithm, you can either:\n \n\u00bb Tweak it to use the existing partitions to find the largest i elements (where i = one mil-lion).\n \n\u00bb Or, once you find the ith largest element, run through the array again to return all ele-ments greater than or equal to it.\nThis algorithm has expected O(n) time.", "question": "Describe an algorithm to find the largest 1 million numbers in 1 billion numbers. As-sume that the computer memory can hold all one billion numbers.", "id": 304}, {"answer": "The solution below does the following:\n1. Sort the array by size, putting the longest word at the front\n2. For each word, split it in all possible ways. That is, for \u201ctest\u201d, split it into {\u201ct\u201d, \u201cest\u201d}, {\u201cte\u201d, \n\u201cst\u201d} and {\u201ctes\u201d, \u201ct\u201d}.\n3. Then, for each pairing, check \nif the first half and the second both exist elsewhere in the \narray.\n4. \u201cShort circuit\u201d by returning the first string we find that fits condition #3.\nWhat is the time complexity of this?\n \n\u00bb Time to sort array: O\n(n log n)\n \n\u00bb Time to check if first / second half of word exists: O(d) per word, where d is the average \nlength of a word.\n \n\u00bb Total complexity: O(n log n + n * d). Note that d is fixed (probably around 5\u201410 charac-ters). Thus, we can guess that for short arrays, the time is estimated by O(n * d) , which \nalso equals O(number of characters in the array). For longer arrays, the time will be bet-ter estimated by O(n log n).\n \n\u00bb Space complexity: O(n).\nOptimizations: If we didn\u2019t want to use additional space, we could cut out the hash\n table. This \nwould mean:\n \n\u00bb Sorting the array in alphabetical order\n \n\u00bb Rather than looking up the word in a hash table, we would use binary search in the array\n \n\u00bb We would no longer be able to short circuit.\n1 class LengthComparator implements Comparator<String> {\n2 @Override\n3 public int compare(String o1, String o2) {\n4 if (o1.length() < o2.length()) return 1;\n5 if (o1.length() > o2.length()) return -1;\n6 return 0;\n7 }\n8 }", "question": "Write a program to find the longest word made of other words.", "id": 306}, {"answer": "First, create a su\u02ddx tree for s. For example, if your word were bibs, you would create the fol-lowing tree:\nThen, all you need to do is search for each string in T in the su\u02ddx tree. Note that if \u201cB\u201d were a \nword, you would come up with two locations.\n1 public class \n2 \n\n3 \n\n4 for (int i = 0; i < s.length(); i++) {\n5", "question": "Given a string s and an array of smaller strings T, design a method to search s for each \nsmall string in T.", "id": 308}, {"answer": "7 }\n8 }\n9 \n10 public ArrayList<Integer> getIndexes(String s) {\n11 return root.getIndexes(s);\n12 }\n13 }\n14 \n15 \n16 \n17 \n18 char value;\n19 ArrayList<Integer> indexes = new ArrayList<Integer>();SBIBSSBIS\n\n20 \n\n21 \n22 public void insertString(String s, int index) {\n23 indexes.add(index);\n24 if (s != null && s.length() > 0) {\n25 value = s.charAt(0);\n26 \n27 if (children.containsKey(value)) {\n28 child = children.get(value);\n29 } else {\n30 \n31 children.put(value, child);\n32 }\n33 String remainder = s.substring(1);\n34 child.insertString(remainder, index);\n35 }\n36 }\n37 \n38 public ArrayList<Integer> getIndexes(String s) {\n39 if (s == null \n40 return indexes;\n41 } else {\n42 \n43 \n44 String remainder = s.substring(1);\n45 \n46 }\n47 }\n48 return null;\n49 }\n50 }\n51 \n52 public class Question {\n53 public static void main(String[] args) {\n54 String testString = \u201cmississippi\u201d;\n55 String[] stringList = {\u201cis\u201d, \u201csip\u201d, \u201chi\u201d, \u201csis\u201d};\n56 \n57 for (String s : stringList) {\n58 ArrayList<Integer> list = tree.getIndexes(s);\n59 if (list != null) {\n60 System.out.println(s + \u201c: \u201c + list.toString());\n61 }\n62 }\n63 }\n64 }", "question": "6", "id": 310}, {"answer": "One solution is to \nuse two priority heaps: a max heap for the values below the median, and \na min heap for the values above the median. The median will be largest value of the max \nheap. When a new value arrives it is placed in the below heap if the value is less than or equal \nto the median, otherwise it is placed into the above heap. The heap sizes can be equal or \nthe below heap has one extra. This constraint can easily be restored by shifting an element \nfrom one heap to the other. The \nmedian is available in constant time, so updates are O(lg n).\n1 private Comparator<Integer> maxHeapComparator, minHeapComparator;\n2 private PriorityQueue<Integer> maxHeap, minHeap;\n3 public void addNewNumber(int randomNumber) {\n4 if (maxHeap.size() == minHeap.size()) {\n5 if ((minHeap.peek() != null) && \n6 randomNumber > minHeap.peek()) {\n7 maxHeap.offer(minHeap.poll());\n8 minHeap.offer(randomNumber);\n9 } else {\n10 maxHeap.offer(randomNumber);\n11 }\n12 }\n13 else {\n14 if(randomNumber < maxHeap.peek()){\n15 minHeap.offer(maxHeap.poll());\n16 maxHeap.offer(randomNumber);\n17 }\n18 else {\n19 minHeap.offer(randomNumber);\n20 }\n21 }\n22 }\n23 public static double getMedian() {\n24 if (maxHeap.isEmpty()) return minHeap.peek();\n25 else if (minHeap.isEmpty()) return maxHeap.peek();\n26 if (maxHeap.size() == minHeap.size()) {\n27 return (minHeap.peek() + maxHeap.peek()) / 2;\n28 } else if (maxHeap.size() > minHeap.size()) {\n29 return maxHeap.peek();\n30 } else {\n31 return minHeap.peek();\n32 }\n33 }", "question": "Numbers are randomly generated and passed to a method. Write a program to find \nand maintain the median value as new values are generated.", "id": 312}, {"answer": "Though this problem seems tough, it\u2019s actually a straightforward modification of breadth-first-search. Each word in our \u201cgraph\u201d branches to all words in the \ndictionary that are one edit \naway. The interesting part is how to implement this\u2014should we build a graph as we go? We \ncould, but there\u2019s an easier way. We can instead use a \u201cbacktrack map.\u201d In this backtrack map, \nif B[v] = w, then you know that you edited v to get w. When we reach our end word, we can \nuse this backtrack map repeatedly to reverse our path. See the code below:\n1 LinkedList<String> transform(String startWord, String stopWord, \n2 Set<String> dictionary) {\n3 startWord = startWord.toUpperCase();\n4 stopWord = stopWord.toUpperCase();\n5 Queue<String> actionQueue = new LinkedList<String>();\n6 Set<String> visitedSet = new HashSet<String>();\n7 Map<String, String> backtrackMap = new TreeMap<String, String>();\n8 \n9 actionQueue.add(startWord);\n10 visitedSet.add(startWord);\n11 \n12 while (!actionQueue.isEmpty()) {\n13 String w = actionQueue.poll();\n14 // For each possible word v from w with one edit operation\n15 for (String v : getOneEditWords(w)) {\n16 if (v.equals(stopWord)) {\n17 // Found our word! Now, back track.\n18 LinkedList<String> list = new LinkedList<String>();\n19 // Append v to list\n20 list.add(v);\n21 while (w != null) {\n22 list.add(0, w);\n23 w = backtrackMap.get(w);\n24 }\n25 return list;\n26 }\n27 // If v is a dictionary word\n\n28 if (dictionary.contains(v)) {\n29 if (!visitedSet.contains(v)) {\n30 actionQueue.add(v);\n31 visitedSet.add(v); // mark visited\n32 backtrackMap.put(v, w);\n33 }\n34 }\n35 }\n36 }\n37 return null;\n38 }\n39 \n40 Set<String> getOneEditWords(String word) {\n41 Set<String> words = new TreeSet<String>();\n42 for (int i = 0; i < word.length(); i++) {\n43 char[] wordArray = word.toCharArray();\n44 // change that letter to something else\n45 for (char c = \u201aA\u2019; c <= \u201aZ\u2019; c++) {\n46 if (c != word.charAt(i)) {\n47 wordArray[i] = c;\n48 words.add(new String(wordArray));\n49 }\n50 }\n51 }\n52 return words;\n53 }\nLet n be the length of the start word and m be the number of like sized words in the diction-ary. The runtime of this algorithm is O(n*m) since the while loop will dequeue at most m \nunique words. The for loop is O(n) as it walks down the string applying a fixed number of \nreplacements for each character.", "question": "Given two words of equal length that are in a dictionary, write a method to transform \none word into another word by changing only one letter at a time. The new word you \nget in each step must be in the dictionary. \nEXAMPLE:\nInput: DAMP, LIKE\nOutput: DAMP -> LAMP -> LIMP -> LIME -> LIKE", "id": 314}, {"answer": "Assumption:\n Square is of size NxN.\nThis algorithm does the following:\n1. Iterate through every (full) \ncolumn from left to right.\n2. At each (full) column (call \nthis currentColumn), look at the subcolumns (from biggest \nto smallest).\n3. At each subcolumn,\n see if you can form a square with the subcolumn as the left side. If \nso, update currentMaxSize and go to the next (full) column.\n4. If N - currentColumn <= currentMaxSize, then break completely. We\u2019ve found the \nlargest square possible. \nWhy?\n At \neach column, we\u2019re trying to create a square with that \ncolumn as the left side. The largest such square we could possibly create is N - \ncurrentCol-umn\n. Thus, if N-currentColumn\n <= currentMaxSize, then we have no need to proceed.\nTime complexity: O(N\n^2).\n1 \n2 assert(matrix.length > 0);\n3 for (int row = 0; row < matrix.length; row++){\n4 assert(matrix[row].length == matrix.length);\n5 }\n6 \n7 int N = matrix.length;\n8 \n9 int currentMaxSize = 0;\n10 Subsquare sq = null;\n11 int col = 0;\n12 \n13 // Iterate through each column from left to right\n14 while (N - col > currentMaxSize) { // See step 4 above \n15 for (int row = 0; row < matrix.length; row++){\n16 // starting from the biggest\n17 int size = N - Math.max(row, col);\n18 while (size > currentMaxSize){\n19 if (isSquare(matrix, row, col, size)){\n20 currentMaxSize = size;\n21 sq = new Subsquare(row, col, size);\n22 break; // go to next (full) column \n\n23 }\n24 size--;\n25 }\n26 }\n27 col++;\n28 }\n29 return sq;\n30 }\n31 \n32 private static boolean isSquare(int[][] matrix, int row, int col, \n33 int size) {\n34 // Check top and bottom border.\n35 for (int j = 0; j < size; j++){\n36 if (matrix[row][col+j] == 1) {\n37 return false;\n38 }\n39 if (matrix[row+size-1][col+j] == 1){\n40 return false;\n41 }\n42 }\n43 \n44 // Check left and right border.\n45 for (int i = 1; i < size - 1; i++){\n46 if (matrix[row+i][col] == 1){\n47 return false;\n48 }\n49 if (matrix[row+i][col+size-1] == 1){\n50 return false;\n51 }\n52 }\n53 return true;\n54 }\n55 \n56 public class Subsquare {\n57 public int row, column, size;\n58 public Subsquare(int r, int c, int sz) {\n59 row = r;\n60 column = c;\n61 size = sz;\n62 }\n63 }", "question": "Imagine you have a square matrix, where each cell is filled with either black or white. \nDesign an algorithm to find the maximum subsquare such that all four borders are \nfilled with black pixels.", "id": 316}, {"answer": "Like many \u201cmaximizing\u201d problems, this problem has a straight forward brute force solution. \nThe brute force solution simply iterates through all possible sub-matrixes, computes the \nsum, and finds the biggest.\nTo iterate through all possible sub-matrixes (with no duplicates), we simply need to iterate \nthrough all order pairings of rows, and then all ordered pairings of columns. \nThis solution is O(N^6), since we iterate through\n O\n(N^4) sub-matrixes, and it takes O(N^2) \ntime to compute the area of each.", "question": "Given an NxN matrix of positive and negative integers, write code to find the sub-matrix with the largest possible sum.", "id": 318}, {"answer": "Given a dictionary of millions of words, give an algorithm to find the largest possible \nrectangle of letters such that every row forms a word (reading left to right) and every \ncolumn forms a word (reading top to bottom).", "question": "Notice that the earlier solution is made slower by a factor of O(N^2) simply because comput-ing the sum of a matrix is so slow. Can we reduce the \ntime to compute the area? Yes! In fact, \nwe can reduce the time of computeSum to O(1).\nConsider the following:\nIf we had the sum of the smaller rectangle (the one including A, B, C, D), and we could com-pute the sum of D as follows: area(D) = area(A through D) - area(A) - area(B) - area(C).\nWhat if, instead, we had the following:\nwith the following values (notice that each Val_* starts at the origin):\nx1\nx2\ny1\ny2ABCDABCD\n\nWith these values, we know the following:area(D) = Val_D - area(A union C) - area(A union B) + area(A).\nOr, written another way:area(D) = Val_D - Val_B - Val_C + Val_A\nCan we e\u02ddciently compute these Val_* values for all points in the matrix? Yes, by using simi-lar logic: Val_(x, y) = Val(x - 1, y) + Val(y - 1, x) - Val(x - 1, y - 1)\nWe can precompute all such values, and then e\u02ddciently find the maximum submatrix. See \nthe following code for this implementation\n1 public static int getMaxMatrix(int[][] original) {\n2 int maxArea = Integer.MIN_VALUE; // Important! Max could be < 0\n3 int rowCount = original.length;\n4 int columnCount = original[0].length;\n5 int[][] matrix = precomputeMatrix(original);\n6 for (int row1 = 0; row1 < rowCount; row1++) {\n7 for (int row2 = row1; row2 < rowCount; row2++) {\n8 for (int col1 = 0; col1 < columnCount; col1++) {\n9 for (int col2 = col1; col2 < columnCount; col2++) {\n10 maxArea = Math.max(maxArea, computeSum(matrix,\n11 row1, row2, col1, col2));\n12 }\n13 }\n14 }\n15 }\n16 return maxArea;\n17 }\n18 \n19 private static int[][] precomputeMatrix(int[][] matrix) {\n20 int[][] sumMatrix = new int[matrix.length][matrix[0].length];\n21 for (int i = 0; i < matrix.length; i++) {\n22 for (int j = 0; j < matrix.length; j++) {\n23 \n24 sumMatrix[i][j] = matrix[i][j];\n25 \n26 sumMatrix[i][j] = sumMatrix[i - 1][j] + matrix[i][j];\n27 \n28 sumMatrix[i][j] = sumMatrix[i][j - 1] + matrix[i][j];\n29 } else {\n30 sumMatrix[i][j] = sumMatrix[i - 1][j] + \n31 sumMatrix[i][j - 1] - sumMatrix[i - 1][j - 1] +\n\n32 matrix[i][j];\n33 }\n34 }\n35 }\n36 return sumMatrix;\n37 }\n38 \n39 private static int computeSum(int[][] sumMatrix, int i1, int i2, \n40 int j1, int j2) {\n41 if (i1 == 0 && j1 == 0) { // starts at row 0, column 0\n42 return sumMatrix[i2][j2];\n43 } else if (i1 == 0) { // start at row 0\n44 return sumMatrix[i2][j2] - sumMatrix[i2][j1 - 1];\n45 } else if (j1 == 0) { // start at column 0\n46 return sumMatrix[i2][j2] - sumMatrix[i1 - 1][j2];\n47 } else {\n48 return sumMatrix[i2][j2] - sumMatrix[i2][j1 - 1] \n49 - sumMatrix[i1 - 1][j2] + sumMatrix[i1 - 1][j1 - 1];\n50 }\n51 }", "id": 320}]
};