mem.tex 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. % vim: set foldmethod=marker foldmarker=<<<,>>>:
  2. \section{Memory/bandwidth optimization}
  3. % 1) (malloc, first-touch, bandwidth, free) for (writing to array)
  4. % 2) (bandwidth) for (reading array) [reduction]
  5. % 3) (flop,bandwidth) for (vector copy, vector-add) (write causes read -- unless streaming write)
  6. % 4) (latency) for (sequential access, strided access) (integer array with indices)
  7. % x2 - single and multi threaded
  8. % plot: X (size), Y (cycles) ---- vary stride length
  9. % spatial and temporal data locality
  10. % hyper threading - shared cache - useful for latency bound
  11. \begin{frame} \frametitle{Memory}{} %<<<
  12. \begin{columns}
  13. \column{0.5\textwidth}
  14. How does computer memory work?
  15. \vspace{2em}
  16. References:
  17. {\small
  18. \begin{itemize}
  19. \setlength\itemsep{1em}
  20. \item Ulrich Drepper -- What every programmer should know about memory (2007)
  21. %https://lwn.net/Articles/252125/
  22. \item Igor Ostrovsky -- Gallery of Processor Cache Effects %\url{http://igoro.com/archive/gallery-of-processor-cache-effects}
  23. \end{itemize}
  24. }
  25. \column{0.5\textwidth}
  26. \center
  27. \includegraphics[width=0.99\textwidth]{figs/cache-hierarchy}
  28. {\footnotesize Source: Intel Software Developer Manual}
  29. \end{columns}
  30. \end{frame}
  31. %>>>
  32. \begin{frame}[t,fragile] \frametitle{Memory benchmarks}{} %<<<
  33. \begin{columns}
  34. \column{0,55\textwidth}
  35. \footnotesize
  36. \begin{overprint}
  37. \onslide<1->%<<<
  38. \begin{minted}[
  39. frame=lines,
  40. fontsize=\footnotesize,
  41. linenos,
  42. autogobble,
  43. mathescape
  44. ]{C++}
  45. long N = 1e9; // 8 GB
  46. // Allocate memory
  47. double* X = (double*)malloc(N*sizeof(double));
  48. // Initialize array
  49. for (long i = 0; i < N; i++) X[i] = i;
  50. // Write to array
  51. for (long i = 0; i < N; i++) X[i] = 2*i;
  52. // Free memory
  53. free(X);
  54. \end{minted}
  55. %>>>
  56. \end{overprint}
  57. \column{0.05\textwidth}
  58. \column{0.4\textwidth}
  59. \vspace{0.3em}
  60. \begin{overprint}
  61. \onslide<2->%<<<
  62. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  63. Allocate memory
  64. T = 1.60821e-05
  65. Initialize array
  66. T = 1.75352 --- 4.6 GB/s
  67. Write to array
  68. T = 0.84467 --- 9.5 GB/s
  69. Free memory
  70. T = 0.0141113
  71. \end{minted}
  72. %\textcolor{red}{\qquad only $1.5\times$ speedup :(}
  73. %>>>
  74. \end{overprint}
  75. \end{columns}
  76. \vspace{0.5em}
  77. \only<3->{
  78. \vspace{0.5em}
  79. \begin{columns}
  80. \column{0.6\textwidth}
  81. \textcolor{red}{Memory allocations are not free!}
  82. \begin{itemize}
  83. \item \textcolor{red}{cost is hidden in initialization (first-touch)}
  84. \end{itemize}
  85. \end{columns}
  86. }
  87. \end{frame}
  88. %>>>
  89. \begin{frame}[t,fragile] \frametitle{L1-cache bandwidth}{} %<<<
  90. \begin{columns}
  91. \column{0,55\textwidth}
  92. \footnotesize
  93. \begin{overprint}
  94. \onslide<1->%<<<
  95. \begin{minted}[
  96. frame=lines,
  97. fontsize=\footnotesize,
  98. linenos,
  99. autogobble,
  100. mathescape
  101. ]{C++}
  102. long N = 2048; // 16KB
  103. double* X = (double*)malloc(N*sizeof(double));
  104. double* Y = (double*)malloc(N*sizeof(double));
  105. // Initialize X, Y
  106. // Write to array
  107. for (long i = 0; i < N; i++) X[i] = 3.14;
  108. // Read from array
  109. double sum = 0;
  110. for (long i = 0; i < N; i++) sum += X[i];
  111. // Adding arrays: 2-reads, 1-write
  112. for (long i = 0; i < N; i++) Y[i] += X[i];
  113. \end{minted}
  114. %>>>
  115. \end{overprint}
  116. \column{0.05\textwidth}
  117. \column{0.4\textwidth}
  118. \vspace{0.5em}
  119. \begin{overprint}
  120. \onslide<2->%<<<
  121. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  122. Writing to array
  123. Bandwidth = 26.2744 GB/s
  124. Reading from array
  125. Bandwidth = 6.57305 GB/s
  126. Adding arrays
  127. Bandwidth = 131.203 GB/s
  128. \end{minted}
  129. %\textcolor{red}{\qquad only $1.5\times$ speedup :(}
  130. %>>>
  131. \end{overprint}
  132. \end{columns}
  133. \end{frame}
  134. %>>>
  135. \begin{frame}[t,fragile] \frametitle{L1-cache bandwidth (vectorized)}{} %<<<
  136. \begin{columns}
  137. \column{0,55\textwidth}
  138. \footnotesize
  139. \begin{overprint}
  140. \onslide<1-2>%<<<
  141. \begin{minted}[
  142. frame=lines,
  143. fontsize=\footnotesize,
  144. linenos,
  145. autogobble,
  146. mathescape
  147. ]{C++}
  148. using Vec = sctl::Vec<double,8>;
  149. long N = 2048; // 16KB
  150. double* X = (double*)malloc(N*sizeof(double));
  151. double* Y = (double*)malloc(N*sizeof(double));
  152. // Initialize X, Y
  153. // Write to array
  154. Vec v = 3.14;
  155. #pragma GCC unroll (4)
  156. for (long i = 0; i < N; i+=8) v.Store(X+i);
  157. \end{minted}
  158. %>>>
  159. \onslide<3-4>%<<<
  160. \begin{minted}[
  161. frame=lines,
  162. fontsize=\footnotesize,
  163. linenos,
  164. autogobble,
  165. mathescape
  166. ]{C++}
  167. // Read from array
  168. Vec sum[8] = {0.,0.,0.,0.,0.,0.,0.,0.};
  169. for (long i = 0; i < N; i+=8*8) {
  170. sum[0] = sum[0] + Vec::Load(X +i);
  171. sum[1] = sum[1] + Vec::Load(X+8 +i);
  172. sum[2] = sum[2] + Vec::Load(X+16+i);
  173. sum[3] = sum[3] + Vec::Load(X+24+i);
  174. sum[4] = sum[4] + Vec::Load(X+32+i);
  175. sum[5] = sum[5] + Vec::Load(X+40+i);
  176. sum[6] = sum[6] + Vec::Load(X+48+i);
  177. sum[7] = sum[7] + Vec::Load(X+56+i);
  178. }
  179. \end{minted}
  180. %>>>
  181. \onslide<5-6>%<<<
  182. \begin{minted}[
  183. frame=lines,
  184. fontsize=\footnotesize,
  185. linenos,
  186. autogobble,
  187. mathescape
  188. ]{C++}
  189. // Adding arrays: 2-reads, 1-write
  190. for (long i = 0; i < N; i+=8*2) {
  191. Vec X0 = Vec::Load(X+0+i);
  192. Vec X1 = Vec::Load(X+8+i);
  193. Vec Y0 = Vec::Load(Y+0+i);
  194. Vec Y1 = Vec::Load(Y+8+i);
  195. (X0+Y0).Store(Y+VecLen*0+i);
  196. (X1+Y1).Store(Y+VecLen*1+i);
  197. }
  198. \end{minted}
  199. %>>>
  200. \end{overprint}
  201. \column{0.05\textwidth}
  202. \column{0.4\textwidth}
  203. \vspace{0.5em}
  204. \begin{overprint}
  205. \onslide<2-3>%<<<
  206. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  207. Writing to array
  208. Bandwidth = 89.5993 GB/s
  209. cycles/iter = 2.35716
  210. \end{minted}
  211. %>>>
  212. \onslide<4-5>%<<<
  213. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  214. Writing to array
  215. Bandwidth = 89.5993 GB/s
  216. cycles/iter = 2.35716
  217. Reading from array
  218. Bandwidth = 210.375 GB/s
  219. cycles/iter = 1.00392
  220. \end{minted}
  221. %>>>
  222. \onslide<6->%<<<
  223. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  224. Writing to array
  225. Bandwidth = 89.5993 GB/s
  226. cycles/iter = 2.35716
  227. Reading from array
  228. Bandwidth = 210.375 GB/s
  229. cycles/iter = 1.00392
  230. Adding arrays
  231. Bandwidth = 148.29 GB/s
  232. cycles/iter = 4.27271
  233. \end{minted}
  234. %>>>
  235. \end{overprint}
  236. \end{columns}
  237. \end{frame}
  238. %>>>
  239. \begin{frame}[t,fragile] \frametitle{L1-cache bandwidth (vectorized \& aligned)}{} %<<<
  240. \begin{columns}
  241. \column{0,55\textwidth}
  242. \begin{overprint}
  243. \onslide<1>%<<<
  244. \vspace{0.5em}
  245. Unaligned read:\\
  246. \resizebox{0.8\textwidth}{!}{\begin{tikzpicture} %<<<
  247. \fill[c3] (0.75,1) rectangle (2.75,1.25);
  248. \draw[step=0.25,thick, darkgray] (0.749,0.99) grid (2.75,1.25);
  249. \node at (3.3,1.125) {\footnotesize register};
  250. \fill[c2] (0,0) rectangle (2,-0.25);
  251. \draw[step=0.25,thick, darkgray] (0,0) grid (2,-0.25);
  252. \fill[c2] (2.25,0) rectangle (4.25,-0.25);
  253. \draw[step=0.25,thick, darkgray] (2.249,0) grid (4.25,-0.25);
  254. \node at (2.1,-0.4) {\footnotesize L1 cache};
  255. \draw[-latex, thick] (0.875,0.1) -- (0.875,0.9);
  256. \draw[-latex, thick] (1.125,0.1) -- (1.125,0.9);
  257. \draw[-latex, thick] (1.375,0.1) -- (1.375,0.9);
  258. \draw[-latex, thick] (1.625,0.1) -- (1.625,0.9);
  259. \draw[-latex, thick] (1.875,0.1) -- (1.875,0.9);
  260. \draw[-latex, thick] (2.375,0.1) -- (2.125,0.9);
  261. \draw[-latex, thick] (2.625,0.1) -- (2.375,0.9);
  262. \draw[-latex, thick] (2.875,0.1) -- (2.625,0.9);
  263. \end{tikzpicture}}%>>>
  264. %>>>
  265. \onslide<2->%<<<
  266. \vspace{0.5em}
  267. Aligned read:\\
  268. \resizebox{0.8\textwidth}{!}{\begin{tikzpicture} %<<<
  269. \fill[c3] (0,1) rectangle (2,1.25);
  270. \draw[step=0.25,thick, darkgray] (0,0.99) grid (2,1.25);
  271. \node at (2.55,1.125) {\footnotesize register};
  272. \fill[c2] (0,0) rectangle (2,-0.25);
  273. \draw[step=0.25,thick, darkgray] (0,0) grid (2,-0.25);
  274. \fill[c2] (2.25,0) rectangle (4.25,-0.25);
  275. \draw[step=0.25,thick, darkgray] (2.249,0) grid (4.25,-0.25);
  276. \node at (2.1,-0.4) {\footnotesize L1 cache};
  277. \draw[-latex, thick] (0.125,0.1) -- (0.125,0.9);
  278. \draw[-latex, thick] (0.375,0.1) -- (0.375,0.9);
  279. \draw[-latex, thick] (0.625,0.1) -- (0.625,0.9);
  280. \draw[-latex, thick] (0.875,0.1) -- (0.875,0.9);
  281. \draw[-latex, thick] (1.125,0.1) -- (1.125,0.9);
  282. \draw[-latex, thick] (1.375,0.1) -- (1.375,0.9);
  283. \draw[-latex, thick] (1.625,0.1) -- (1.625,0.9);
  284. \draw[-latex, thick] (1.875,0.1) -- (1.875,0.9);
  285. \end{tikzpicture}}%>>>
  286. \vspace{0.2em}
  287. \small
  288. Replace:
  289. \begin{itemize}
  290. \item malloc $\rightarrow$ sctl::aligned\_new
  291. \item Vec::Load $\rightarrow$ Vec::AlignedLoad
  292. \item Vec::Store $\rightarrow$ Vec::AlignedStore
  293. \end{itemize}
  294. %>>>
  295. \end{overprint}
  296. \column{0.05\textwidth}
  297. \column{0.4\textwidth}
  298. \begin{overprint}
  299. \onslide<3->%<<<
  300. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  301. Writing to array
  302. Bandwidth = 210.273 GB/s
  303. cycles/iter = 1.00441
  304. Reading from array
  305. Bandwidth = 380.953 GB/s
  306. cycles/iter = 0.554399
  307. Adding arrays
  308. Bandwidth = 325.592 GB/s
  309. cycles/iter = 1.94599
  310. \end{minted}
  311. %>>>
  312. \end{overprint}
  313. \end{columns}
  314. \vspace{1em}
  315. \begin{columns}
  316. \column{0.65\textwidth}
  317. \only<3>{\textcolor{red}{Aligned memory acceses to L1 can be $2\times$ faster!}}
  318. \end{columns}
  319. \end{frame}
  320. %>>>
  321. \begin{frame}[t,fragile] \frametitle{Main memory bandwidth}{} %<<<
  322. \begin{columns}
  323. \column{0,6\textwidth}
  324. \footnotesize
  325. \begin{overprint}
  326. \onslide<1->%<<<
  327. \begin{minted}[
  328. frame=lines,
  329. fontsize=\footnotesize,
  330. linenos,
  331. autogobble,
  332. mathescape
  333. ]{C++}
  334. long N = 1e9; // 8 GB
  335. // Initialize X, Y
  336. for (long i = 0; i < N; i++) X[i] = Y[i] = i;
  337. // Write to array
  338. #pragma omp parallel for schedule(static)
  339. for (long i = 0; i < N; i++) X[i] = 3.14;
  340. // Read from array
  341. double sum = 0;
  342. #pragma omp parallel for schedule(static) reduction(+:sum)
  343. for (long i = 0; i < N; i++) sum += X[i];
  344. // Adding arrays: 2-reads, 1-write
  345. #pragma omp parallel for schedule(static)
  346. for (long i = 0; i < N; i++) Y[i] += X[i];
  347. \end{minted}
  348. %>>>
  349. \end{overprint}
  350. \column{0.05\textwidth}
  351. \column{0.35\textwidth}
  352. \vspace{0.5em}
  353. \begin{overprint}
  354. \onslide<2->%<<<
  355. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  356. Writing to array
  357. Bandwidth = 35.4136 GB/s
  358. Reading from array
  359. Bandwidth = 69.4623 GB/s
  360. Adding arrays
  361. Bandwidth = 113.637 GB/s
  362. \end{minted}
  363. %\textcolor{red}{\qquad only $1.5\times$ speedup :(}
  364. %>>>
  365. \end{overprint}
  366. \end{columns}
  367. \end{frame}
  368. %>>>
  369. \begin{frame} \frametitle{Non-uniform Memory Access}{} %<<<
  370. \begin{itemize}
  371. %\item {\bf Cores:} individual processing units.
  372. %\item {\bf Sockets:} collection of cores on the same silicon die.
  373. \item Each sockets connected to its own DRAM.
  374. \item Sockets interconnected using a network: QPI (Intel), HT (AMD).
  375. \item Location of memory pages determined by first-touch policy.
  376. \end{itemize}
  377. \center
  378. \includegraphics[width=0.7\textwidth]{figs/numa1}
  379. {\scriptsize Source: \url{https://frankdenneman.nl/2016/07/07/numa-deep-dive-part-1-uma-numa}}
  380. \end{frame}
  381. %>>>
  382. \begin{frame}[t,fragile] \frametitle{Main memory bandwidth (NUMA aware)}{} %<<<
  383. \begin{columns}
  384. \column{0,6\textwidth}
  385. \footnotesize
  386. \begin{overprint}
  387. \onslide<1-2>%<<<
  388. \begin{minted}[
  389. frame=lines,
  390. fontsize=\footnotesize,
  391. linenos,
  392. autogobble,
  393. mathescape
  394. ]{C++}
  395. long N = 1e9; // 8 GB
  396. // Initialize X, Y
  397. #pragma omp parallel for schedule(static)
  398. for (long i = 0; i < N; i++) X[i] = Y[i] = i;
  399. // Write to array
  400. #pragma omp parallel for schedule(static)
  401. for (long i = 0; i < N; i++) X[i] = 3.14;
  402. // Read from array
  403. double sum = 0;
  404. #pragma omp parallel for schedule(static) reduction(+:sum)
  405. for (long i = 0; i < N; i++) sum += X[i];
  406. // Adding arrays: 2-reads, 1-write
  407. #pragma omp parallel for schedule(static)
  408. for (long i = 0; i < N; i++) Y[i] += X[i];
  409. \end{minted}
  410. %>>>
  411. \onslide<3>%<<<
  412. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  413. \end{minted}
  414. \center
  415. \vspace{8em}
  416. \textcolor{red}{\normalsize Many shared-memory codes scale poorly \\
  417. because they don't account for NUMA!}
  418. %>>>
  419. \end{overprint}
  420. \column{0.05\textwidth}
  421. \column{0.35\textwidth}
  422. \begin{overprint}
  423. \onslide<1>%<<<
  424. Set thread affinity:
  425. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  426. export OMP_PLACES=cores
  427. export OMP_PROC_BIND=spread
  428. \end{minted}
  429. %>>>
  430. \onslide<2->%<<<
  431. \vspace{-1.5em}
  432. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  433. \end{minted}
  434. {\footnotesize \underline{Original:}}
  435. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  436. Writing to array
  437. Bandwidth = 35.4136 GB/s
  438. \end{minted}
  439. \vspace{0.1ex}
  440. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  441. Reading from array
  442. Bandwidth = 69.4623 GB/s
  443. \end{minted}
  444. \vspace{0.1ex}
  445. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  446. Adding arrays
  447. Bandwidth = 113.637 GB/s
  448. \end{minted}
  449. \vspace{0.2em}
  450. {\footnotesize \underline{NUMA aware:}}
  451. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  452. Writing to array
  453. Bandwidth = 87.1515 GB/s
  454. \end{minted}
  455. \vspace{0.1ex}
  456. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  457. Reading from array
  458. Bandwidth = 160.663 GB/s
  459. \end{minted}
  460. \vspace{0.1ex}
  461. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  462. Adding arrays
  463. Bandwidth = 180.069 GB/s
  464. \end{minted}
  465. %>>>
  466. \end{overprint}
  467. \end{columns}
  468. \end{frame}
  469. %>>>
  470. \begin{frame} \frametitle{Memory bandwidth and latency}{} %<<<
  471. \begin{columns}
  472. \column{0.5\textwidth}
  473. \center
  474. {$32\times$ difference between \\
  475. L1 and main memory bandwidth!}
  476. \vspace{1em}
  477. \resizebox{1.0\textwidth}{!}{\begin{tikzpicture} %<<<
  478. \begin{loglogaxis}[width=12cm,height=8cm, xmin=8192, xmax=256000000, ymin=80, ymax=6000,
  479. xlabel={array size per core (bytes)}, ylabel=Bandwidth (GB/s), legend pos=south west, legend style={draw=none}]
  480. \addplot[mark=none, thick, color=blue] table [x={size}, y={read-bw}] {data/bw.txt};
  481. \addplot[mark=none, thick, color=red] table [x={size}, y={write-bw}] {data/bw.txt};
  482. \addplot[mark=none, thick, color=black] table [x={size}, y={vecadd-bw}] {data/bw.txt};
  483. \addplot[mark=none, color=gray, thick] coordinates { (32768,8) (32768,80000)};
  484. \addplot[mark=none, color=gray, thick] coordinates { (1048576,8) (1048576,80000)};
  485. \addplot[mark=none, color=gray, thick] coordinates { (3244032,8) (3244032,80000)};
  486. \legend{{read-bw},{write-bw},{read+write-bw}}
  487. \end{loglogaxis}
  488. \end{tikzpicture}} %>>>
  489. \column{0.5\textwidth}
  490. \center
  491. {$56\times$ difference between \\
  492. L1 and main memory latency!}
  493. \vspace{1em}
  494. \resizebox{1.0\textwidth}{!}{\begin{tikzpicture} %<<<
  495. \begin{loglogaxis}[width=12cm,height=8cm, xmin=8192, xmax=256000000, ymin=4, ymax=300,
  496. xlabel={array size (bytes)}, ylabel=cycles, legend pos=north west, legend style={draw=none}]
  497. \addplot[mark=none, thick, color=black] table [x={bytes}, y={cycles}] {data/latency.txt};
  498. \addplot[mark=none, color=gray, thick] coordinates { (32768,1) (32768,5000)};
  499. \addplot[mark=none, color=gray, thick] coordinates { (1048576,1) (1048576,5000)};
  500. \addplot[mark=none, color=gray, thick] coordinates {(25952256,1) (25952256,5000)};
  501. \legend{{latency}}
  502. \end{loglogaxis}
  503. \end{tikzpicture}} %>>>
  504. \end{columns}
  505. \end{frame}
  506. %>>>
  507. \begin{frame}[fragile] \frametitle{Optimizing GEMM for memory access}{} %<<<
  508. \begin{columns}
  509. \column{0.5\textwidth}
  510. \begin{overprint}
  511. \onslide<1->%<<<
  512. \resizebox{0.99\textwidth}{!}{\begin{tikzpicture} %<<<
  513. \node at (-0.5,-1) {$M$};
  514. \node at (1,0.5) {$N$};
  515. \draw[latex-latex, thick] (0,0.25) -- (2,0.25);
  516. \draw[latex-latex, thick] (-0.25,0) -- (-0.25,-2);
  517. \fill[c2] (0,0) rectangle (2,-2);
  518. \draw[step=0.25,thick, darkgray] (0,0) grid (2,-2);
  519. \node at (1,-1) {\Large C};
  520. \node at (2.5,-1) {$=$};
  521. \node at (4.25,0.5) {$K$};
  522. \draw[latex-latex, thick] (3,0.25) -- (5.5,0.25);
  523. \fill[c3] (3,0) rectangle (5.5,-2);
  524. \draw[step=0.25,thick, darkgray] (2.99,0) grid (5.5,-2);
  525. \node at (4.25,-1) {\Large A};
  526. \node at (6,-1) {$\times$};
  527. \fill[c4] (6.5,0) rectangle (8.5,-2.5);
  528. \draw[step=0.25,thick, darkgray] (6.49,0) grid (8.5,-2.5);
  529. \node at (7.5,-1.25) {\Large B};
  530. \end{tikzpicture}}%>>>
  531. \begin{minted}[
  532. frame=lines,
  533. fontsize=\scriptsize,
  534. baselinestretch=1,
  535. numbersep=5pt,
  536. linenos,
  537. autogobble,
  538. framesep=1mm,
  539. mathescape
  540. ]{C++}
  541. void GEMM(int M, int N, int K, double* A, int LDA,
  542. double* B, int LDB, double* C, int LDC) {
  543. for (int j = 0; j < N; j++)
  544. for (int k = 0; k < K; k++)
  545. for (int i = 0; i < M; i++)
  546. C[i+j*LDC] += A[i+k*LDA] * B[k+j*LDB];
  547. }
  548. \end{minted}
  549. %>>>
  550. \qquad \qquad {\small M = N = K = 2000}
  551. \end{overprint}
  552. \column{0.05\textwidth}
  553. \column{0.5\textwidth}
  554. \begin{overprint}
  555. \onslide<1>%<<<
  556. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  557. \end{minted}
  558. {\bf perf:} performance monitoring tool which samples hardware counters
  559. %>>>
  560. \onslide<2->%<<<
  561. \begin{minted}[autogobble,fontsize=\footnotesize]{text}
  562. \end{minted}
  563. {\bf perf:} performance monitoring tool which samples hardware counters
  564. \vspace{1em}
  565. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  566. ~> g++ -O3 -march=native gemm.cpp
  567. ~> perf stat -e L1-dcache-load-misses \
  568. -e L1-dcache-loads -e l2_rqsts.miss \
  569. -e l2_rqsts.references -e LLC-load-misses \
  570. -e LLC-loads ./a.out
  571. FLOP rate = 4.87547 GFLOP/s
  572. 30,311,624,911 L1-dcache-loads
  573. 14,900,283,807 L1-dcache-load-misses 49.16% of all L1-dcache accesses
  574. 24,387,281,512 l2_rqsts.references
  575. 10,034,752,513 l2_rqsts.miss
  576. 2,260,778,457 LLC-loads
  577. 1,310,606,484 LLC-load-misses 57.97% of all LL-cache accesses
  578. \end{minted}
  579. %>>>
  580. \end{overprint}
  581. \end{columns}
  582. \end{frame}
  583. %>>>
  584. \begin{frame}[fragile] \frametitle{GEMM blocking}{} %<<<
  585. \begin{columns}
  586. \column{0.5\textwidth}
  587. \begin{overprint}
  588. \onslide<1>%<<<
  589. \begin{minted}[
  590. frame=lines,
  591. fontsize=\scriptsize,
  592. baselinestretch=1,
  593. numbersep=5pt,
  594. linenos,
  595. autogobble,
  596. framesep=1mm,
  597. mathescape
  598. ]{C++}
  599. template <int M, int N, int K>
  600. void GEMM_blocked(double* A, int LDA,
  601. double* B, int LDB, double* C, int LDC) {
  602. for (int j = 0; j < N; j++)
  603. for (int k = 0; k < K; k++)
  604. for (int i = 0; i < M; i++)
  605. C[i+j*LDC] += A[i+k*LDA] * B[k+j*LDB];
  606. }
  607. template <int M, int N, int K,
  608. int Mb, int Nb, int Kb, int... NN>
  609. void GEMM_blocked(double* A, int LDA,
  610. double* B, int LDB, double* C, int LDC) {
  611. for (int j = 0; j < N; j+=Nb)
  612. for (int i = 0; i < M; i+=Mb)
  613. for (int k = 0; k < K; k+=Kb)
  614. GEMM_blocked<Mb,Nb,Kb, NN...>(A+i+k*LDA,LDA,
  615. B+k+j*LDB,LDB, C+i+j*LDC,LDC);
  616. }
  617. \end{minted}
  618. %>>>
  619. \onslide<2->%<<<
  620. \begin{minted}[
  621. frame=lines,
  622. fontsize=\scriptsize,
  623. baselinestretch=1,
  624. numbersep=5pt,
  625. linenos,
  626. autogobble,
  627. framesep=1mm,
  628. mathescape
  629. ]{C++}
  630. template <int M, int N, int K>
  631. void GEMM_blocked(double* A, int LDA,
  632. double* B, int LDB, double* C, int LDC) {
  633. GEMM_ker_vec_unrolled<M,N,K>(A,LDA, B,LDB, C,LDC);
  634. }
  635. template <int M, int N, int K,
  636. int Mb, int Nb, int Kb, int... NN>
  637. void GEMM_blocked(double* A, int LDA,
  638. double* B, int LDB, double* C, int LDC) {
  639. for (int j = 0; j < N; j+=Nb)
  640. for (int i = 0; i < M; i+=Mb)
  641. for (int k = 0; k < K; k+=Kb)
  642. GEMM_blocked<Mb,Nb,Kb, NN...>(A+i+k*LDA,LDA,
  643. B+k+j*LDB,LDB, C+i+j*LDC,LDC);
  644. }
  645. \end{minted}
  646. %>>>
  647. \end{overprint}
  648. \column{0.05\textwidth}
  649. \column{0.55\textwidth}
  650. \begin{overprint}
  651. \onslide<1-2>%<<<
  652. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  653. \end{minted}
  654. \includegraphics[width=0.99\textwidth]{figs/gemm-tiling}
  655. %>>>
  656. \onslide<3>%<<<
  657. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  658. \end{minted}
  659. {\small GEMM\_blocked<M,N,K, 8,10,40>(...)}
  660. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  661. FLOP rate = 11.803 GFLOP/s
  662. 11,514,598,988 L1-dcache-loads
  663. 3,274,256,252 L1-dcache-load-misses 28.44% of all L1-dcache accesses
  664. 3,283,717,404 l2_rqsts.references
  665. 1,047,408,896 l2_rqsts.miss
  666. 1,032,604,200 LLC-loads
  667. 293,256,535 LLC-load-misses 28.40% of all LL-cache accesses
  668. \end{minted}
  669. %>>>
  670. \onslide<4>%<<<
  671. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  672. \end{minted}
  673. {\small GEMM\_blocked<M,N,K, 8,10,40>(...)}
  674. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  675. FLOP rate = 11.803 GFLOP/s
  676. 11,514,598,988 L1-dcache-loads
  677. 3,274,256,252 L1-dcache-load-misses 28.44% of all L1-dcache accesses
  678. 3,283,717,404 l2_rqsts.references
  679. 1,047,408,896 l2_rqsts.miss
  680. 1,032,604,200 LLC-loads
  681. 293,256,535 LLC-load-misses 28.40% of all LL-cache accesses
  682. \end{minted}
  683. \vspace{0.5em}
  684. {\small GEMM\_blocked<M,N,K, 40,40,40, 8,10,40>(...)}
  685. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  686. FLOP rate = 26.5831 GFLOP/s
  687. 11,533,695,903 L1-dcache-loads
  688. 1,084,624,171 L1-dcache-load-misses 9.40% of all L1-dcache accesses
  689. 1,091,155,596 l2_rqsts.references
  690. 538,256,077 l2_rqsts.miss
  691. 470,615,736 LLC-loads
  692. 112,816,293 LLC-load-misses 23.97% of all LL-cache accesses
  693. \end{minted}
  694. %>>>
  695. \onslide<5>%<<<
  696. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  697. \end{minted}
  698. {\small GEMM\_blocked<M,N,K, 40,40,40, 8,10,40>(...)}
  699. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  700. FLOP rate = 26.5831 GFLOP/s
  701. 11,533,695,903 L1-dcache-loads
  702. 1,084,624,171 L1-dcache-load-misses 9.40% of all L1-dcache accesses
  703. 1,091,155,596 l2_rqsts.references
  704. 538,256,077 l2_rqsts.miss
  705. 470,615,736 LLC-loads
  706. 112,816,293 LLC-load-misses 23.97% of all LL-cache accesses
  707. \end{minted}
  708. %>>>
  709. \onslide<6>%<<<
  710. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  711. \end{minted}
  712. {\small GEMM\_blocked<M,N,K, 40,40,40, 8,10,40>(...)}
  713. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  714. FLOP rate = 26.5831 GFLOP/s
  715. 11,533,695,903 L1-dcache-loads
  716. 1,084,624,171 L1-dcache-load-misses 9.40% of all L1-dcache accesses
  717. 1,091,155,596 l2_rqsts.references
  718. 538,256,077 l2_rqsts.miss
  719. 470,615,736 LLC-loads
  720. 112,816,293 LLC-load-misses 23.97% of all LL-cache accesses
  721. \end{minted}
  722. {\small GEMM\_blocked<M,N,K, 200,200,200, \\
  723. \phantom{000000000000000000} 40,40,40, 8,10,40>(...)}
  724. \begin{minted}[autogobble,fontsize=\scriptsize]{text}
  725. FLOP rate = 43.1604 GFLOP/s
  726. 11,531,903,350 L1-dcache-loads
  727. 1,094,841,388 L1-dcache-load-misses 9.49% of all L1-dcache accesses
  728. 1,194,502,755 l2_rqsts.references
  729. 201,888,454 l2_rqsts.miss
  730. 116,940,584 LLC-loads
  731. 44,894,302 LLC-load-misses 38.39% of all LL-cache accesses
  732. \end{minted}
  733. %>>>
  734. \end{overprint}
  735. \end{columns}
  736. \end{frame}
  737. %>>>
  738. \begin{frame} \frametitle{Memory and caches summary}{} %<<<
  739. \begin{itemize}
  740. \item test
  741. \end{itemize}
  742. % many ways to shoot yourself in the foot:
  743. % thread contention
  744. % cache coherency
  745. % thread pinning
  746. % NUMA
  747. % locks / atomic / synchronization
  748. \end{frame}
  749. %>>>
  750. % Stack vs heap memory
  751. % vector vs linked list
  752. %\begin{frame} \frametitle{Shared memory pitfalls}{} %<<<
  753. %
  754. % % many ways to shoot yourself in the foot:
  755. %
  756. % % thread contention
  757. % % cache coherency
  758. % % thread pinning
  759. % % NUMA
  760. % % locks / atomic / synchronization
  761. %
  762. %\end{frame}
  763. %%>>>