summaryrefslogtreecommitdiff
path: root/cesar/ecos/packages/kernel/current/doc/kernel.sgml
blob: 694ecafee11d262e7652d519521b9c2d3950289b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
<!-- {{{ Banner                         -->

<!-- =============================================================== -->
<!--                                                                 -->
<!--     kernel.sgml                                                 -->
<!--                                                                 -->
<!--     eCos kernel documentation.                                  -->
<!--                                                                 -->
<!-- =============================================================== -->
<!-- ####COPYRIGHTBEGIN####                                          -->
<!--                                                                 -->
<!-- =============================================================== -->
<!-- Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.  -->
<!-- This material may be distributed only subject to the terms      -->
<!-- and conditions set forth in the Open Publication License, v1.0  -->
<!-- or later (the latest version is presently available at          -->
<!-- http://www.opencontent.org/openpub/)                            -->
<!-- Distribution of the work or derivative of the work in any       -->
<!-- standard (paper) book form is prohibited unless prior           -->
<!-- permission obtained from the copyright holder                   -->
<!-- =============================================================== -->
<!--                                                                 -->      
<!-- ####COPYRIGHTEND####                                            -->
<!-- =============================================================== -->
<!-- #####DESCRIPTIONBEGIN####                                       -->
<!--                                                                 -->
<!-- Author(s):    nickg, bartv, markg                               -->
<!-- Contributors: eCos team                                         -->
<!-- Date:        2002/02/13                                         -->
<!-- Version:     0.02                                               -->
<!--                                                                 -->
<!-- ####DESCRIPTIONEND####                                          -->
<!-- =============================================================== -->

<!-- }}} -->

<part id="kernel">
  <title>The eCos Kernel</title>

<!-- {{{ Overview                       -->

  <refentry id="kernel-overview">

    <refmeta>
    <refentrytitle>Kernel Overview</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>Kernel</refname>
      <refpurpose>Overview of the eCos Kernel</refpurpose>
    </refnamediv>

<!-- {{{ Description                    -->

    <refsect1 id="kernel-overview-description">
      <title>Description</title>
      <para>
The kernel is one of the key packages in all of eCos. It provides the
core functionality needed for developing multi-threaded applications:
      </para>
      <orderedlist>
        <listitem><para>
The ability to create new threads in the system, either during startup
or when the system is already running.
        </para></listitem>
        <listitem><para>
Control over the various threads in the system, for example
manipulating their priorities.
        </para></listitem>
        <listitem><para>
A choice of schedulers, determining which thread should currently be
running. 
        </para></listitem>
        <listitem><para>
A range of synchronization primitives, allowing threads to interact
and share data safely.
        </para></listitem>
        <listitem><para>
Integration with the system's support for interrupts and exceptions.
        </para></listitem>
      </orderedlist>
      <para>
In some other operating systems the kernel provides additional
functionality. For example the kernel may also provide memory
allocation functionality, and device drivers may be part of the kernel
as well. This is not the case for eCos. Memory allocation is handled
by a separate package. Similary each device driver will typically be a
separate package. Various packages are combined and configured using
the eCos configuration technology to meet the requirements of the
application.
      </para>
      <para>
The eCos kernel package is optional. It is possible to write
single-threaded applications which do not use any kernel
functionality, for example RedBoot. Typically such applications are
based around a central polling loop, continually checking all devices
and taking appropriate action when I/O occurs. A small amount of
calculation is possible every iteration, at the cost of an increased
delay between an I/O event occurring and the polling loop detecting
the event. When the requirements are straightforward it may well be
easier to develop the application using a polling loop, avoiding the
complexities of multiple threads and synchronization between threads.
As requirements get more complicated a multi-threaded solution becomes
more appropriate, requiring the use of the kernel. In fact some of the
more advanced packages in eCos, for example the TCP/IP stack, use
multi-threading internally. Therefore if the application uses any of
those packages then the kernel becomes a required package, not an
optional one.
      </para>
      <para>
The kernel functionality can be used in one of two ways. The kernel
provides its own C API, with functions like
<function>cyg_thread_create</function> and
<function>cyg_mutex_lock</function>. These can be called directly from
application code or from other packages. Alternatively there are a
number of packages which provide compatibility with existing API's,
for example POSIX threads or &micro;ITRON. These allow application
code to call standard functions such as
<function>pthread_create</function>, and those functions are
implemented using the basic functionality provided by the eCos kernel.
Using compatibility packages in an eCos application can make it much
easier to reuse code developed in other environments, and to share
code.
      </para>
      <para>
Although the different compatibility packages have similar
requirements on the underlying kernel, for example the ability to
create a new thread, there are differences in the exact semantics. For
example, strict &micro;ITRON compliance requires that kernel
timeslicing is disabled. This is achieved largely through the
configuration technology. The kernel provides a number of
configuration options that control the exact semantics that are
provided, and the various compatibility packages require particular
settings for those options. This has two important consequences.
First, it is not usually possible to have two different compatibility
packages in one eCos configuration because they will have conflicting
requirements on the underlying kernel. Second, the semantics of the
kernel's own API are only loosely defined because of the many
configuration options. For example <function>cyg_mutex_lock</function>
will always attempt to lock a mutex, but various configuration options
determine the behaviour when the mutex is already locked and there is
a possibility of priority inversion.
      </para>
      <para>
The optional nature of the kernel package presents some complications
for other code, especially device drivers. Wherever possible a device
driver should work whether or not the kernel is present. However there
are some parts of the system, especially those related to interrupt
handling, which should be implemented differently in multi-threaded
environments containing the eCos kernel and in single-threaded
environments without the kernel. To cope with both scenarios the
common HAL package provides a driver API, with functions such as
<function>cyg_drv_interrupt_attach</function>. When the kernel package
is present these driver API functions map directly on to the
equivalent kernel functions such as
<function>cyg_interrupt_attach</function>, using macros to avoid any
overheads. When the kernel is absent the common HAL package implements
the driver API directly, but this implementation is simpler than the
one in the kernel because it can assume a single-threaded environment. 
      </para>
    </refsect1>

<!-- }}} -->
<!-- {{{ Schedulers                     -->

    <refsect1 id="kernel-overview-schedulers">
      <title>Schedulers</title>
      <para>
When a system involves multiple threads, a scheduler is needed to
determine which thread should currently be running. The eCos kernel
can be configured with one of two schedulers, the bitmap scheduler and
the multi-level queue (MLQ) scheduler. The bitmap scheduler is
somewhat more efficient, but has a number of limitations. Most systems
will instead use the MLQ scheduler. Other schedulers may be added in
the future, either as extensions to the kernel package or in separate
packages.
      </para>
      <para>
Both the bitmap and the MLQ scheduler use a simple numerical priority
to determine which thread should be running. The number of priority
levels is configurable via the option
<varname>CYGNUM_KERNEL_SCHED_PRIORITIES</varname>, but a typical
system will have up to 32 priority levels. Therefore thread priorities
will be in the range 0 to 31, with 0 being the highest priority and 31
the lowest. Usually only the system's idle thread will run at the
lowest priority. Thread priorities are absolute, so the kernel will
only run a lower-priority thread if all higher-priority threads are
currently blocked.
      </para>
      <para>
The bitmap scheduler only allows one thread per priority level, so if
the system is configured with 32 priority levels then it is limited to
only 32 threads &mdash; still enough for many applications. A simple
bitmap can be used to keep track of which threads are currently
runnable. Bitmaps can also be used to keep track of threads waiting on
a mutex or other synchronization primitive. Identifying the
highest-priority runnable or waiting thread involves a simple
operation on the bitmap, and an array index operation can then be used
to get hold of the thread data structure itself. This makes the
bitmap scheduler fast and totally deterministic.
      </para>
      <para>
The MLQ scheduler allows multiple threads to run at the same priority.
This means that there is no limit on the number of threads in the
system, other than the amount of memory available. However operations
such as finding the highest priority runnable thread are a little bit
more expensive than for the bitmap scheduler.
      </para>
      <para>
Optionally the MLQ scheduler supports timeslicing, where the scheduler
automatically switches from one runnable thread to another when some
number of clock ticks have occurred. Timeslicing only comes into play
when there are two runnable threads at the same priority and no higher
priority runnable threads. If timeslicing is disabled then a thread
will not be preempted by another thread of the same priority, and will
continue running until either it explicitly yields the processor or
until it blocks by, for example, waiting on a synchronization
primitive. The configuration options
<varname>CYGSEM_KERNEL_SCHED_TIMESLICE</varname> and
<varname>CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS</varname> control
timeslicing. The bitmap scheduler does not provide timeslicing
support. It only allows one thread per priority level, so it is not
possible to preempt the current thread in favour of another one with
the same priority.
      </para>
      <para>
Another important configuration option that affects the MLQ scheduler
is <varname>CYGIMP_KERNEL_SCHED_SORTED_QUEUES</varname>. This
determines what happens when a thread blocks, for example by waiting
on a semaphore which has no pending events. The default behaviour of
the system is last-in-first-out queuing. For example if several
threads are waiting on a semaphore and an event is posted, the thread
that gets woken up is the last one that called
<function>cyg_semaphore_wait</function>. This allows for a simple and
fast implementation of both the queue and dequeue operations. However
if there are several queued threads with different priorities, it may
not be the highest priority one that gets woken up. In practice this
is rarely a problem: usually there will be at most one thread waiting
on a queue, or when there are several threads they will be of the same
priority. However if the application does require strict priority
queueing then the option
<varname>CYGIMP_KERNEL_SCHED_SORTED_QUEUES</varname> should be
enabled. There are disadvantages: more work is needed whenever a
thread is queued, and the scheduler needs to be locked for this
operation so the system's dispatch latency is worse. If the bitmap
scheduler is used then priority queueing is automatic and does not
involve any penalties.
      </para>
      <para>
Some kernel functionality is currently only supported with the MLQ
scheduler, not the bitmap scheduler. This includes support for SMP
systems, and protection against priority inversion using either mutex
priority ceilings or priority inheritance.
      </para>
    </refsect1>

<!-- }}} -->
<!-- {{{ Synch primitives               -->

    <refsect1 id="kernel-overview-synch-primitives">
      <title>Synchronization Primitives</title>
      <para>
The eCos kernel provides a number of different synchronization
primitives: <link linkend="kernel-mutexes">mutexes</link>,
<link linkend="kernel-condition-variables">condition variables</link>,
<link linkend="kernel-semaphores">counting semaphores</link>,
<link linkend="kernel-mail-boxes">mail boxes</link> and
<link linkend="kernel-flags">event flags</link>.
      </para>
      <para>
Mutexes serve a very different purpose from the other primitives. A
mutex allows multiple threads to share a resource safely: a thread
locks a mutex, manipulates the shared resource, and then unlocks the
mutex again. The other primitives are used to communicate information
between threads, or alternatively from a DSR associated with an
interrupt handler to a thread.
      </para>
      <para>
When a thread that has locked a mutex needs to wait for some condition
to become true, it should use a condition variable. A condition
variable is essentially just a place for a thread to wait, and which
another thread, or DSR, can use to wake it up. When a thread waits on
a condition variable it releases the mutex before waiting, and when it
wakes up it reacquires it before proceeding. These operations are
atomic so that synchronization race conditions cannot be introduced.
      </para>
      <para>
A counting semaphore is used to indicate that a particular event has
occurred. A consumer thread can wait for this event to occur, and a
producer thread or a DSR can post the event. There is a count
associated with the semaphore so if the event occurs multiple times in
quick succession this information is not lost, and the appropriate
number of semaphore wait operations will succeed.
      </para>
      <para>
Mail boxes are also used to indicate that a particular event has
occurred, and allows for one item of data to be exchanged per event.
Typically this item of data would be a pointer to some data structure.
Because of the need to store this extra data, mail boxes have a
finite capacity. If a producer thread generates mail box events
faster than they can be consumed then, to avoid overflow, it will be
blocked until space is again available in the mail box. This means
that mail boxes usually cannot be used by a DSR to wake up a
thread. Instead mail boxes are typically only used between threads.
      </para>
      <para>
Event flags can be used to wait on some number of different events,
and to signal that one or several of these events have occurred. This
is achieved by associating bits in a bit mask with the different
events. Unlike a counting semaphore no attempt is made to keep track
of the number of events that have occurred, only the fact that an
event has occurred at least once. Unlike a mail box it is not
possible to send additional data with the event, but this does mean
that there is no possibility of an overflow and hence event flags can
be used between a DSR and a thread as well as between threads.
      </para>
      <para>
The eCos common HAL package provides its own device driver API which
contains some of the above synchronization primitives. These allow
the DSR for an interrupt handler to signal events to higher-level
code. If the configuration includes the eCos kernel package then
the driver API routines map directly on to the equivalent kernel
routines, allowing interrupt handlers to interact with threads. If the
kernel package is not included and the application consists of just a
single thread running in polled mode then the driver API is
implemented entirely within the common HAL, and with no need to worry
about multiple threads the implementation can obviously be rather
simpler. 
      </para>
    </refsect1>

<!-- }}} -->
<!-- {{{ Threads and interrupts         -->

    <refsect1 id="kernel-overview-threads-interrupts">
      <title>Threads and Interrupt Handling</title>
      <para>
During normal operation the processor will be running one of the
threads in the system. This may be an application thread, a system
thread running inside say the TCP/IP stack, or the idle thread. From
time to time a hardware interrupt will occur, causing control to be
transferred briefly to an interrupt handler. When the interrupt has
been completed the system's scheduler will decide whether to return
control to the interrupted thread or to some other runnable thread.
      </para>
      <para>
Threads and interrupt handlers must be able to interact. If a thread
is waiting for some I/O operation to complete, the interrupt handler
associated with that I/O must be able to inform the thread that the
operation has completed. This can be achieved in a number of ways. One
very simple approach is for the interrupt handler to set a volatile
variable. A thread can then poll continuously until this flag is set,
possibly sleeping for a clock tick in between. Polling continuously
means that the cpu time is not available for other activities, which
may be acceptable for some but not all applications. Polling once
every clock tick imposes much less overhead, but means that the thread
may not detect that the I/O event has occurred until an entire clock
tick has elapsed. In typical systems this could be as long as 10
milliseconds. Such a delay might be acceptable for some applications,
but not all.
      </para>
      <para>
A better solution would be to use one of the synchronization
primitives. The interrupt handler could signal a condition variable,
post to a semaphore, or use one of the other primitives. The thread
would perform a wait operation on the same primitive. It would not
consume any cpu cycles until the I/O event had occurred, and when the
event does occur the thread can start running again immediately
(subject to any higher priority threads that might also be runnable).
      </para>
      <para>
Synchronization primitives constitute shared data, so care must be
taken to avoid problems with concurrent access. If the thread that was
interrupted was just performing some calculations then the interrupt
handler could manipulate the synchronization primitive quite safely.
However if the interrupted thread happened to be inside some kernel
call then there is a real possibility that some kernel data structure
will be corrupted. 
      </para>
      <para>
One way of avoiding such problems would be for the kernel functions to
disable interrupts when executing any critical region. On most
architectures this would be simple to implement and very fast, but it
would mean that interrupts would be disabled often and for quite a
long time. For some applications that might not matter, but many
embedded applications require that the interrupt handler run as soon
as possible after the hardware interrupt has occurred. If the kernel
relied on disabling interrupts then it would not be able to support
such applications.
      </para>
      <para>
Instead the kernel uses a two-level approach to interrupt handling.
Associated with every interrupt vector is an Interrupt Service Routine
or ISR, which will run as quickly as possible so that it can service
the hardware. However an ISR can make only a small number of kernel
calls, mostly related to the interrupt subsystem, and it cannot make
any call that would cause a thread to wake up. If an ISR detects that
an I/O operation has completed and hence that a thread should be woken
up, it can cause the associated Deferred Service Routine or DSR to
run. A DSR is allowed to make more kernel calls, for example it can
signal a condition variable or post to a semaphore.
      </para>
      <para>
Disabling interrupts prevents ISRs from running, but very few parts of
the system disable interrupts and then only for short periods of time.
The main reason for a thread to disable interrupts is to manipulate
some state that is shared with an ISR. For example if a thread needs
to add another buffer to a linked list of free buffers and the ISR may
remove a buffer from this list at any time, the thread would need to
disable interrupts for the few instructions needed to manipulate the
list. If the hardware raises an interrupt at this time, it remains
pending until interrupts are reenabled.
      </para>
      <para>
Analogous to interrupts being disabled or enabled, the kernel has a
scheduler lock. The various kernel functions such as
<function>cyg_mutex_lock</function> and
<function>cyg_semaphore_post</function> will claim the scheduler lock,
manipulate the kernel data structures, and then release the scheduler
lock. If an interrupt results in a DSR being requested and the
scheduler is currently locked, the DSR remains pending. When the
scheduler lock is released any pending DSRs will run. These may post
events to synchronization primitives, causing other higher priority
threads to be woken up.
      </para>
      <para>
For an example, consider the following scenario. The system has a high
priority thread A, responsible for processing some data coming from an
external device. This device will raise an interrupt when data is
available. There are two other threads B and C which spend their time
performing calculations and occasionally writing results to a display
of some sort. This display is a shared resource so a mutex is used to
control access.
      </para>
      <para>
At a particular moment in time thread A is likely to be blocked,
waiting on a semaphore or another synchronization primitive until data
is available. Thread B might be running performing some calculations,
and thread C is runnable waiting for its next timeslice. Interrupts
are enabled, and the scheduler is unlocked because none of the threads
are in the middle of a kernel operation. At this point the device
raises an interrupt. The hardware transfers control to a low-level
interrupt handler provided by eCos which works out exactly which
interrupt occurs, and then the corresponding ISR is run. This ISR
manipulates the hardware as appropriate, determines that there is now
data available, and wants to wake up thread A by posting to the
semaphore. However ISR's are not allowed to call
<function>cyg_semaphore_post</function> directly, so instead the ISR
requests that its associated DSR be run and returns. There are no more
interrupts to be processed, so the kernel next checks for DSR's. One
DSR is pending and the scheduler is currently unlocked, so the DSR can
run immediately and post the semaphore. This will have the effect of
making thread A runnable again, so the scheduler's data structures are
adjusted accordingly. When the DSR returns thread B is no longer the
highest priority runnable thread so it will be suspended, and instead
thread A gains control over the cpu.
      </para>
      <para>
In the above example no kernel data structures were being manipulated
at the exact moment that the interrupt happened. However that cannot
be assumed. Suppose that thread B had finished its current set of
calculations and wanted to write the results to the display. It would
claim the appropriate mutex and manipulate the display. Now suppose
that thread B was timesliced in favour of thread C, and that thread C
also finished its calculations and wanted to write the results to the
display. It would call <function>cyg_mutex_lock</function>. This
kernel call locks the scheduler, examines the current state of the
mutex, discovers that the mutex is already owned by another thread,
suspends the current thread, and switches control to another runnable
thread. Another interrupt happens in the middle of this
<function>cyg_mutex_lock</function> call, causing the ISR to run
immediately. The ISR decides that thread A should be woken up so it
requests that its DSR be run and returns back to the kernel. At this
point there is a pending DSR, but the scheduler is still locked by the
call to <function>cyg_mutex_lock</function> so the DSR cannot run
immediately. Instead the call to <function>cyg_mutex_lock</function>
is allowed to continue, which at some point involves unlocking the
scheduler. The pending DSR can now run, safely post the semaphore, and
thus wake up thread A.
      </para>
      <para>
If the ISR had called <function>cyg_semaphore_post</function> directly
rather than leaving it to a DSR, it is likely that there would have
been some sort of corruption of a kernel data structure. For example
the kernel might have completely lost track of one of the threads, and
that thread would never have run again. The two-level approach to
interrupt handling, ISR's and DSR's, prevents such problems with no
need to disable interrupts.
      </para>
    </refsect1>

<!-- }}} -->
<!-- {{{ Calling contexts               -->

    <refsect1 id="kernel-overview-contexts">
      <title>Calling Contexts</title>
      <para>
eCos defines a number of contexts. Only certain calls are allowed from
inside each context, for example most operations on threads or
synchronization primitives are not allowed from ISR context. The
different contexts are initialization, thread, ISR and DSR.
      </para>
      <para>
When eCos starts up it goes through a number of phases, including
setting up the hardware and invoking C++ static constructors. During
this time interrupts are disabled and the scheduler is locked. When a
configuration includes the kernel package the final operation is a
call to <link
linkend="kernel-schedcontrol"><function>cyg_scheduler_start</function></link>.
At this point interrupts are enabled, the scheduler is unlocked, and
control is transferred to the highest priority runnable thread. If the
configuration also includes the C library package then usually the C
library startup package will have created a thread which will call the
application's <function>main</function> entry point.
      </para>
      <para>
Some application code can also run before the scheduler is started,
and this code runs in initialization context. If the application is
written partly or completely in C++ then the constructors for any
static objects will be run. Alternatively application code can define
a function <function>cyg_user_start</function> which gets called after
any C++ static constructors. This allows applications to be written
entirely in C.
      </para>
      <programlisting width=72>
void
cyg_user_start(void)
{
    /* Perform application-specific initialization here */
}
      </programlisting>
      <para>
It is not necessary for applications to provide a
<function>cyg_user_start</function> function since the system will
provide a default implementation which does nothing.
      </para>
      <para>
Typical operations that are performed from inside static constructors
or <function>cyg_user_start</function> include creating threads,
synchronization primitives, setting up alarms, and registering
application-specific interrupt handlers. In fact for many applications
all such creation operations happen at this time, using statically
allocated data, avoiding any need for dynamic memory allocation or
other overheads.
      </para>
      <para>
Code running in initialization context runs with interrupts disabled
and the scheduler locked. It is not permitted to reenable interrupts
or unlock the scheduler because the system is not guaranteed to be in
a totally consistent state at this point. A consequence is that
initialization code cannot use synchronization primitives such as
<function>cyg_semaphore_wait</function> to wait for an external event.
It is permitted to lock and unlock a mutex: there are no other threads
running so it is guaranteed that the mutex is not yet locked, and
therefore the lock operation will never block; this is useful when
making library calls that may use a mutex internally.
      </para>
      <para>
At the end of the startup sequence the system will call
<function>cyg_scheduler_start</function> and the various threads will
start running. In thread context nearly all of the kernel functions
are available. There may be some restrictions on interrupt-related
operations, depending on the target hardware. For example the hardware
may require that interrupts be acknowledged in the ISR or DSR before
control returns to thread context, in which case
<filename>cyg_interrupt_acknowledge</filename> should not be called
by a thread.
      </para>
      <para>
At any time the processor may receive an external interrupt, causing
control to be transferred from the current thread. Typically a VSR
provided by eCos will run and determine exactly which interrupt
occurred. Then the VSR will switch to the appropriate ISR, which can
be provided by a HAL package, a device driver, or by the application.
During this time the system is running at ISR context, and most of the
kernel function calls are disallowed. This includes the various
synchronization primitives, so for example an ISR is not allowed to
post to a semaphore to indicate that an event has happened. Usually
the only operations that should be performed from inside an ISR are
ones related to the interrupt subsystem itself, for example masking an
interrupt or acknowledging that an interrupt has been processed. On
SMP systems it is also possible to use spinlocks from ISR context.
      </para>
      <para>
When an ISR returns it can request that the corresponding DSR be run
as soon as it is safe to do so, and that will run in DSR context. This
context is also used for running alarm functions, and threads can
switch temporarily to DSR context by locking the scheduler. Only
certain kernel functions can be called from DSR context, although more
than in ISR context. In particular it is possible to use any
synchronization primitives which cannot block.  These include
<function>cyg_semaphore_post</function>,
<filename>cyg_cond_signal</filename>,
<function>cyg_cond_broadcast</function>,
<function>cyg_flag_setbits</function>, and
<function>cyg_mbox_tryput</function>. It is not possible to use any
primitives that may block such as
<function>cyg_semaphore_wait</function>,
<function>cyg_mutex_lock</function>, or
<function>cyg_mbox_put</function>. Calling such functions from inside
a DSR may cause the system to hang.
      </para>
      <para>
The specific documentation for the various kernel functions gives more
details about valid contexts.
      </para>
    </refsect1>

<!-- }}} -->
<!-- {{{ Error handling                 -->

    <refsect1 id="kernel-overview-errors">
      <title>Error Handling and Assertions</title>
      <para>
In many APIs each function is expected to perform some validation of
its parameters and possibly of the current state of the system. This
is supposed to ensure that each function is used correctly, and that
application code is not attempting to perform a semaphore operation on
a mutex or anything like that. If an error is detected then a suitable
error code is returned, for example the POSIX function
<function>pthread_mutex_lock</function> can return various error codes
including <literal>EINVAL</literal> and <literal>EDEADLK</literal>.
There are a number of problems with this approach, especially in the
context of deeply embedded systems:
      </para>
      <orderedlist>
        <listitem><para>
Performing these checks inside the mutex lock and all the other
functions requires extra cpu cycles and adds significantly to the code
size. Even if the application is written correctly and only makes
system function calls with sensible arguments and under the right
conditions, these overheads still exist.
        </para></listitem>
        <listitem><para>
Returning an error code is only useful if the calling code detects
these error codes and takes appropriate action. In practice the
calling code will often ignore any errors because the programmer
<emphasis>&ldquo;knows&rdquo;</emphasis> that the function is being
used correctly. If the programmer is mistaken then an error condition
may be detected and reported, but the application continues running
anyway and is likely to fail some time later in mysterious ways.
        </para></listitem>
        <listitem><para>
If the calling code does always check for error codes, that adds yet
more cpu cycles and code size overhead. 
        </para></listitem>
        <listitem><para>
Usually there will be no way to recover from certain errors, so if the
application code detected an error such as <literal>EINVAL</literal>
then all it could do is abort the application somehow.
        </para></listitem>
      </orderedlist>
      <para>
The approach taken within the eCos kernel is different. Functions such
as <function>cyg_mutex_lock</function> will not return an error code.
Instead they contain various assertions, which can be enabled or
disabled. During the development process assertions are normally left
enabled, and the various kernel functions will perform parameter
checks and other system consistency checks. If a problem is detected
then an assertion failure will be reported and the application will be
terminated. In a typical debug session a suitable breakpoint will have
been installed and the developer can now examine the state of the
system and work out exactly what is going on. Towards the end of the
development cycle assertions will be disabled by manipulating
configuration options within the eCos infrastructure package, and all
assertions will be eliminated at compile-time. The assumption is that
by this time the application code has been mostly debugged: the
initial version of the code might have tried to perform a semaphore
operation on a mutex, but any problems like that will have been fixed
some time ago. This approach has a number of advantages:
      </para>
      <orderedlist>
        <listitem><para>
In the final application there will be no overheads for checking
parameters and other conditions. All that code will have been
eliminated at compile-time.
        </para></listitem>
        <listitem><para>
Because the final application will not suffer any overheads, it is
reasonable for the system to do more work during the development
process. In particular the various assertions can test for more error
conditions and more complicated errors. When an error is detected
it is possible to give a text message describing the error rather than
just return an error code.
        </para></listitem>
        <listitem><para>
There is no need for application programmers to handle error codes
returned by various kernel function calls. This simplifies the
application code.
        </para></listitem>
        <listitem><para>
If an error is detected then an assertion failure will be reported
immediately and the application will be halted. There is no
possibility of an error condition being ignored because application
code did not check for an error code.
        </para></listitem>
      </orderedlist>
      <para>
Although none of the kernel functions return an error code, many of
them do return a status condition. For example the function
<function>cyg_semaphore_timed_wait</function> waits until either an
event has been posted to a semaphore, or until a certain number of
clock ticks have occurred. Usually the calling code will need to know
whether the wait operation succeeded or whether a timeout occurred.
<function>cyg_semaphore_timed_wait</function> returns a boolean: a
return value of zero or false indicates a timeout, a non-zero return
value indicates that the wait succeeded.
      </para>
      <para>
In conventional APIs one common error conditions is lack of memory.
For example the POSIX function <function>pthread_create</function>
usually has to allocate some memory dynamically for the thread stack
and other per-thread data. If the target hardware does not have enough
memory to meet all demands, or more commonly if the application
contains a memory leak, then there may not be enough memory available
and the function call would fail. The eCos kernel avoids such problems
by never performing any dynamic memory allocation. Instead it is the
responsibility of the application code to provide all the memory
required for kernel data structures and other needs. In the case of
<function>cyg_thread_create</function> this means a
<structname>cyg_thread</structname> data structure to hold the thread
details, and a <type>char</type> array for the thread stack.
      </para>
      <para>
In many applications this approach results in all data structures
being allocated statically rather than dynamically. This has several
advantages. If the application is in fact too large for the target
hardware's memory then there will be an error at link-time rather than
at run-time, making the problem much easier to diagnose. Static
allocation does not involve any of the usual overheads associated with
dynamic allocation, for example there is no need to keep track of the
various free blocks in the system, and it may be possible to eliminate
<function>malloc</function> from the system completely. Problems such
as fragmentation and memory leaks cannot occur if all data is
allocated statically. However, some applications are sufficiently
complicated that dynamic memory allocation is required, and the
various kernel functions do not distinguish between statically and
dynamically allocated memory. It still remains the responsibility of
the calling code to ensure that sufficient memory is available, and
passing null pointers to the kernel will result in assertions or
system failure.
      </para>
    </refsect1>

<!-- }}} -->

  </refentry>

<!-- }}} -->
<!-- {{{ SMP                            -->

  <refentry id="kernel-SMP">

    <refmeta>
    <refentrytitle>SMP Support</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>SMP</refname>
      <refpurpose>Support Symmetric Multiprocessing Systems</refpurpose>
    </refnamediv>

    <refsect1 id="kernel-smp-description">
      <title>Description</title>
      <para>
eCos contains support for limited Symmetric Multi-Processing (SMP).
This is only available on selected architectures and platforms.
The implementation has a number of restrictions on the kind of
hardware supported. These are described in <xref linkend="hal-smp-support">.
    </para>

    <para>
The following sections describe the changes that have been made to the
eCos kernel to support SMP operation.
    </para>
    </refsect1>

    <refsect1 id="kernel-smp-startup">
      <title>System Startup</title>
      <para>
The system startup sequence needs to be somewhat different on an SMP
system, although this is largely transparent to application code. The
main startup takes place on only one CPU, called the primary CPU. All
other CPUs, the secondary CPUs, are either placed in suspended state
at reset, or are captured by the HAL and put into a spin as they start
up. The primary CPU is responsible for copying the DATA segment and
zeroing the BSS (if required), calling HAL variant and platform
initialization routines and invoking constructors. It then calls
<function>cyg_start</function> to enter the application. The
application may then create extra threads and other objects.
      </para>
      <para>
It is only when the application calls
<function>cyg_scheduler_start</function> that the secondary CPUs are
initialized. This routine scans the list of available secondary CPUs
and invokes <function>HAL_SMP_CPU_START</function> to start each
CPU. Finally it calls an internal function
<function>Cyg_Scheduler::start_cpu</function> to enter the scheduler
for the primary CPU.
      </para>
      <para>
Each secondary CPU starts in the HAL, where it completes any per-CPU
initialization before calling into the kernel at
<function>cyg_kernel_cpu_startup</function>. Here it claims the
scheduler lock and calls
<function>Cyg_Scheduler::start_cpu</function>.
      </para>
      <para>
<function>Cyg_Scheduler::start_cpu</function> is common to both the
primary and secondary CPUs. The first thing this code does is to
install an interrupt object for this CPU's inter-CPU interrupt. From
this point on the code is the same as for the single CPU case: an
initial thread is chosen and entered.
      </para>
      <para>
From this point on the CPUs are all equal, eCos makes no further
distinction between the primary and secondary CPUs. However, the
hardware may still distinguish between them as far as interrupt
delivery is concerned.
      </para>
    </refsect1>

    <refsect1 id="kernel-smp-scheduling">
      <title>Scheduling</title>
      <para>
To function correctly an operating system kernel must protect its
vital data structures, such as the run queues, from concurrent
access. In a single CPU system the only concurrent activities to worry
about are asynchronous interrupts. The kernel can easily guard its
data structures against these by disabling interrupts. However, in a
multi-CPU system, this is inadequate since it does not block access by
other CPUs.
      </para>
      <para>
The eCos kernel protects its vital data structures using the scheduler
lock. In single CPU systems this is a simple counter that is
atomically incremented to acquire the lock and decremented to release
it. If the lock is decremented to zero then the scheduler may be
invoked to choose a different thread to run. Because interrupts may
continue to be serviced while the scheduler lock is claimed, ISRs are
not allowed to access kernel data structures, or call kernel routines
that can. Instead all such operations are deferred to an associated
DSR routine that is run during the lock release operation, when the
data structures are in a consistent state.
      </para>
      <para>
By choosing a kernel locking mechanism that does not rely on interrupt
manipulation to protect data structures, it is easier to convert eCos
to SMP than would otherwise be the case. The principal change needed to
make eCos SMP-safe is to convert the scheduler lock into a nestable
spin lock. This is done by adding a spinlock and a CPU id to the
original counter.
      </para>
      <para>
The algorithm for acquiring the scheduler lock is very simple. If the
scheduler lock's CPU id matches the current CPU then it can just increment
the counter and continue. If it does not match, the CPU must spin on
the spinlock, after which it may increment the counter and store its
own identity in the CPU id.
      </para>
      <para>
To release the lock, the counter is decremented. If it goes to zero
the CPU id value must be set to NONE and the spinlock cleared.
      </para>
      <para>
To protect these sequences against interrupts, they must be performed
with interrupts disabled. However, since these are very short code
sequences, they will not have an adverse effect on the interrupt
latency.
      </para>
      <para>
Beyond converting the scheduler lock, further preparing the kernel for
SMP is a relatively minor matter. The main changes are to convert
various scalar housekeeping variables into arrays indexed by CPU
id. These include the current thread pointer, the need_reschedule
flag and the timeslice counter.
      </para>
      <para>
At present only the Multi-Level Queue (MLQ) scheduler is capable of
supporting SMP configurations. The main change made to this scheduler
is to cope with having several threads in execution at the same
time. Running threads are marked with the CPU that they are executing on.
When scheduling a thread, the scheduler skips past any running threads
until it finds a thread that is pending. While not a constant-time
algorithm, as in the single CPU case, this is still deterministic,
since the worst case time is bounded by the number of CPUs in the
system.
      </para>
      <para>
A second change to the scheduler is in the code used to decide when
the scheduler should be called to choose a new thread. The scheduler
attempts to keep the <property>n</property> CPUs running the
<property>n</property> highest priority threads. Since an event or
interrupt on one CPU may require a reschedule on another CPU, there
must be a mechanism for deciding this. The algorithm currently
implemented is very simple. Given a thread that has just been awakened
(or had its priority changed), the scheduler scans the CPUs, starting
with the one it is currently running on, for a current thread that is
of lower priority than the new one. If one is found then a reschedule
interrupt is sent to that CPU and the scan continues, but now using
the current thread of the rescheduled CPU as the candidate thread. In
this way the new thread gets to run as quickly as possible, hopefully
on the current CPU, and the remaining CPUs will pick up the remaining
highest priority threads as a consequence of processing the reschedule
interrupt.
      </para>
      <para>
The final change to the scheduler is in the handling of
timeslicing. Only one CPU receives timer interrupts, although all CPUs
must handle timeslicing. To make this work, the CPU that receives the
timer interrupt decrements the timeslice counter for all CPUs, not
just its own. If the counter for a CPU reaches zero, then it sends a
timeslice interrupt to that CPU. On receiving the interrupt the
destination CPU enters the scheduler and looks for another thread at
the same priority to run. This is somewhat more efficient than
distributing clock ticks to all CPUs, since the interrupt is only
needed when a timeslice occurs.
      </para>
      <para>
All existing synchronization mechanisms work as before in an SMP
system. Additional synchronization mechanisms have been added to
provide explicit synchronization for SMP, in the form of
<link linkend="kernel-spinlocks">spinlocks</link>.
      </para>
    </refsect1>

    <refsect1 id="kernel-smp-interrupts">
      <title>SMP Interrupt Handling</title>
      <para>
The main area where the SMP nature of a system requires special
attention is in device drivers and especially interrupt handling. It
is quite possible for the ISR, DSR and thread components of a device
driver to execute on different CPUs. For this reason it is much more
important that SMP-capable device drivers use the interrupt-related
functions correctly. Typically a device driver would use the driver
API rather than call the kernel directly, but it is unlikely that
anybody would attempt to use a multiprocessor system without the
kernel package.
      </para>
      <para>
Two new functions have been added to the Kernel API
to do <link linkend="kernel-interrupts-smp">interrupt
routing</link>: <function>cyg_interrupt_set_cpu</function> and
<function>cyg_interrupt_get_cpu</function>. Although not currently
supported, special values for the cpu argument may be used in future
to indicate that the interrupt is being routed dynamically or is
CPU-local. Once a vector has been routed to a new CPU, all other
interrupt masking and configuration operations are relative to that
CPU, where relevant.
      </para>

      <para>
There are more details of how interrupts should be handled in SMP
systems in <xref linkend="devapi-smp-support">.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->

<!-- {{{ cyg_thread_create()            -->

  <refentry id="kernel-thread-create">

    <refmeta>
    <refentrytitle>Thread creation</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_create</refname>
      <refpurpose>Create a new thread</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
	<funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;    
	</funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_thread_create</function></funcdef>
          <paramdef>cyg_addrword_t <parameter>sched_info</parameter></paramdef>
          <paramdef>cyg_thread_entry_t* <parameter>entry</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>entry_data</parameter></paramdef>
          <paramdef>char* <parameter>name</parameter></paramdef>
          <paramdef>void* <parameter>stack_base</parameter></paramdef>
          <paramdef>cyg_ucount32 <parameter>stack_size</parameter></paramdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_thread* <parameter>thread</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-thread-create-description"><title>Description</title>
      <para>
The <function>cyg_thread_create</function> function allows application
code and eCos packages to create new threads. In many applications
this only happens during system initialization and all required data
is allocated statically.  However additional threads can be created at
any time, if necessary. A newly created thread is always in suspended
state and will not start running until it has been resumed via a call
to <function>cyg_thread_resume</function>. Also, if threads are
created during system initialization then they will not start running
until the eCos scheduler has been started.
      </para>
      <para>
The <parameter class="function">name</parameter> argument is used
primarily for debugging purposes, making it easier to keep track of
which <structname>cyg_thread</structname> structure is associated with
which application-level thread. The kernel configuration option
<varname>CYGVAR_KERNEL_THREADS_NAME</varname> controls whether or not
this name is actually used.
      </para>
      <para>
On creation each thread is assigned a unique handle, and this will be
stored in the location pointed at by the <parameter
class="function">handle</parameter> argument. Subsequent operations on
this thread including the required
<function>cyg_thread_resume</function> should use this handle to
identify the thread.
      </para>
      <para>
The kernel requires a small amount of space for each thread, in the
form of a <structname>cyg_thread</structname> data structure, to hold
information such as the current state of that thread. To avoid any
need for dynamic memory allocation within the kernel this space has to
be provided by higher-level code, typically in the form of a static
variable. The <parameter class="function">thread</parameter> argument
provides this space.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-create-entry"><title>Thread Entry Point</title>
      <para>
The entry point for a thread takes the form:
      </para>
      <programlisting width=72>
void
thread_entry_function(cyg_addrword_t data)
{
    &hellip;
}
      </programlisting>
      <para>
The second argument to <function>cyg_thread_create</function> is a
pointer to such a function. The third argument <parameter
class="function">entry_data</parameter> is used to pass additional
data to the function. Typically this takes the form of a pointer to
some static data, or a small integer, or <literal>0</literal> if the
thread does not require any additional data.
      </para>
      <para>
If the thread entry function ever returns then this is equivalent to
the thread calling <function>cyg_thread_exit</function>. Even though
the thread will no longer run again, it remains registered with the
scheduler. If the application needs to re-use the
<structname>cyg_thread</structname> data structure then a call to
<function>cyg_thread_delete</function> is required first.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-create-priorities"><title>Thread Priorities</title>
      <para>
The <parameter class="function">sched_info</parameter> argument
provides additional information to the scheduler. The exact details
depend on the scheduler being used. For the bitmap and mlqueue
schedulers it is a small integer, typically in the range 0 to 31, with
0 being the highest priority. The lowest priority is normally used
only by the system's idle thread. The exact number of priorities is
controlled by the kernel configuration option
<varname>CYGNUM_KERNEL_SCHED_PRIORITIES</varname>. 
      </para>
      <para>
It is the responsibility of the application developer to be aware of
the various threads in the system, including those created by eCos
packages, and to ensure that all threads run at suitable priorities.
For threads created by other packages the documentation provided by
those packages should indicate any requirements.
      </para>
      <para>
The functions <function>cyg_thread_set_priority</function>,
<function>cyg_thread_get_priority</function>, and
<function>cyg_thread_get_current_priority</function> can be used to
manipulate a thread's priority.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-create-stack"><title>Stacks and Stack Sizes</title>
      <para>
Each thread needs its own stack for local variables and to keep track
of function calls and returns. Again it is expected that this stack is
provided by the calling code, usually in the form of static data, so
that the kernel does not need any dynamic memory allocation
facilities. <function>cyg_thread_create</function> takes two arguments
related to the stack, a pointer to the base of the stack and the total
size of this stack. On many processors stacks actually descend from the
top down, so the kernel will add the stack size to the base address to
determine the starting location.
      </para>
      <para>
The exact stack size requirements for any given thread depend on a
number of factors. The most important is of course the code that will
be executed in the context of this code: if this involves significant
nesting of function calls, recursion, or large local arrays, then the
stack size needs to be set to a suitably high value. There are some
architectural issues, for example the number of cpu registers and the
calling conventions will have some effect on stack usage. Also,
depending on the configuration, it is possible that some other code
such as interrupt handlers will occasionally run on the current
thread's stack. This depends in part on configuration options such as
<varname>CYGIMP_HAL_COMMON_INTERRUPTS_USE_INTERRUPT_STACK</varname>
and <varname>CYGSEM_HAL_COMMON_INTERRUPTS_ALLOW_NESTING</varname>.
      </para>
      <para>
Determining an application's actual stack size requirements is the
responsibility of the application developer, since the kernel cannot
know in advance what code a given thread will run. However, the system
does provide some hints about reasonable stack sizes in the form of
two constants: <varname>CYGNUM_HAL_STACK_SIZE_MINIMUM</varname> and
<varname>CYGNUM_HAL_STACK_SIZE_TYPICAL</varname>. These are defined by
the appropriate HAL package. The <varname>MINIMUM</varname> value is
appropriate for a thread that just runs a single function and makes
very simple system calls. Trying to create a thread with a smaller
stack than this is illegal. The <varname>TYPICAL</varname> value is
appropriate for applications where application calls are nested no
more than half a dozen or so levels, and there are no large arrays on
the stack.
      </para>
      <para>
If the stack sizes are not estimated correctly and a stack overflow
occurs, the probably result is some form of memory corruption. This
can be very hard to track down. The kernel does contain some code to
help detect stack overflows, controlled by the configuration option
<varname>CYGFUN_KERNEL_THREADS_STACK_CHECKING</varname>: a small
amount of space is reserved at the stack limit and filled with a
special signature: every time a thread context switch occurs this
signature is checked, and if invalid that is a good indication (but
not absolute proof) that a stack overflow has occurred. This form of
stack checking is enabled by default when the system is built with
debugging enabled. A related configuration option is
<varname>CYGFUN_KERNEL_THREADS_STACK_MEASUREMENT</varname>: enabling
this option means that a thread can call the function
<function>cyg_thread_measure_stack_usage</function> to find out the
maximum stack usage to date. Note that this is not necessarily the
true maximum because, for example, it is possible that in the current
run no interrupt occurred at the worst possible moment.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-create-context"><title>Valid contexts</title>
      <para>
<function>cyg_thread_create</function> may be called during
initialization and from within thread context. It may not be called
from inside a DSR.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-create-example"><title>Example</title>
      <para>
A simple example of thread creation is shown below. This involves
creating five threads, one producer and four consumers or workers. The
threads are created in the system's
<function>cyg_user_start</function>: depending on the configuration it
might be more appropriate to do this elsewhere, for example inside
<function>main</function>.
      </para>
      <programlisting width=72>
#include &lt;cyg/hal/hal_arch.h&gt;
#include &lt;cyg/kernel/kapi.h&gt;

// These numbers depend entirely on your application
#define NUMBER_OF_WORKERS    4
#define PRODUCER_PRIORITY   10
#define WORKER_PRIORITY     11
#define PRODUCER_STACKSIZE  CYGNUM_HAL_STACK_SIZE_TYPICAL
#define WORKER_STACKSIZE    (CYGNUM_HAL_STACK_SIZE_MINIMUM + 1024)

static unsigned char producer_stack[PRODUCER_STACKSIZE];
static unsigned char worker_stacks[NUMBER_OF_WORKERS][WORKER_STACKSIZE];
static cyg_handle_t producer_handle, worker_handles[NUMBER_OF_WORKERS];
static cyg_thread   producer_thread, worker_threads[NUMBER_OF_WORKERS];

static void
producer(cyg_addrword_t data)
{
    &hellip;
}

static void
worker(cyg_addrword_t data)
{
    &hellip;
}

void
cyg_user_start(void)
{
    int i;

    cyg_thread_create(PRODUCER_PRIORITY, &amp;producer, 0, "producer",
                      producer_stack, PRODUCER_STACKSIZE,
                      &amp;producer_handle, &amp;producer_thread);
    cyg_thread_resume(producer_handle);
    for (i = 0; i < NUMBER_OF_WORKERS; i++) {
        cyg_thread_create(WORKER_PRIORITY, &amp;worker, i, "worker",
                          worker_stacks[i], WORKER_STACKSIZE,
                          &amp;(worker_handles[i]), &amp;(worker_threads[i]));
        cyg_thread_resume(worker_handles[i]);
    }
}
      </programlisting>
    </refsect1>


    <refsect1 id="kernel-thread-create-cxx"><title>Thread Entry Points and C++</title>
      <para>
For code written in C++ the thread entry function must be either a
static member function of a class or an ordinary function outside any
class. It cannot be a normal member function of a class because such
member functions take an implicit additional argument
<varname>this</varname>, and the kernel has no way of knowing what
value to use for this argument. One way around this problem is to make
use of a special static member function, for example:
      </para>
      <programlisting width=72>
class fred {
  public:
    void thread_function();
    static void static_thread_aux(cyg_addrword_t);
};

void
fred::static_thread_aux(cyg_addrword_t objptr)
{
    fred* object = static_cast&lt;fred*&gt;(objptr);
    object-&gt;thread_function();
}

static fred instance;

extern "C" void
cyg_start( void )
{
    &hellip;
    cyg_thread_create( &hellip;,
                      &amp;fred::static_thread_aux,
                      static_cast&lt;cyg_addrword_t&gt;(&amp;instance),
                      &hellip;);
    &hellip;
}
      </programlisting>
      <para>
Effectively this uses the <parameter
class="function">entry_data</parameter> argument to
<function>cyg_thread_create</function> to hold the
<varname>this</varname> pointer. Unfortunately this approach does
require the use of some C++ casts, so some of the type safety that can
be achieved when programming in C++ is lost.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Thread info                    -->

  <refentry id="kernel-thread-info">

    <refmeta>
    <refentrytitle>Thread information</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_self</refname>
      <refname>cyg_thread_idle_thread</refname>
      <refname>cyg_thread_get_stack_base</refname>
      <refname>cyg_thread_get_stack_size</refname>
      <refname>cyg_thread_measure_stack_usage</refname>
      <refname>cyg_thread_get_next</refname>
      <refname>cyg_thread_get_info</refname>
      <refname>cyg_thread_get_id</refname>
      <refname>cyg_thread_find</refname>
      <refpurpose>Get basic thread information</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
	<funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;    
	</funcsynopsisinfo>
        <funcprototype>
          <funcdef>cyg_handle_t <function>cyg_thread_self</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_handle_t <function>cyg_thread_idle_thread</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_addrword_t <function>cyg_thread_get_stack_base</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_uint32 <function>cyg_thread_get_stack_size</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_uint32 <function>cyg_thread_measure_stack_usage</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>        
        <funcprototype>
          <funcdef>cyg_bool <function>cyg_thread_get_next</function></funcdef>
          <paramdef>cyg_handle_t *<parameter>thread</parameter></paramdef>
          <paramdef>cyg_uint16 *<parameter>id</parameter></paramdef>
        </funcprototype>        
        <funcprototype>
          <funcdef>cyg_bool <function>cyg_thread_get_info</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
          <paramdef>cyg_uint16 <parameter>id</parameter></paramdef>
          <paramdef>cyg_thread_info *<parameter>info</parameter></paramdef>
        </funcprototype>        
        <funcprototype>
          <funcdef>cyg_uint16 <function>cyg_thread_get_id</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>        
        <funcprototype>
          <funcdef>cyg_handle_t <function>cyg_thread_find</function></funcdef>
          <paramdef>cyg_uint16 <parameter>id</parameter></paramdef>
        </funcprototype>        
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-thread-info-description"><title>Description</title>
      <para>
These functions can be used to obtain some basic information about
various threads in the system. Typically they serve little or no
purpose in real applications, but they can be useful during debugging.
      </para>
      <para>
<function>cyg_thread_self</function> returns a handle corresponding
to the current thread. It will be the same as the value filled in by
<function>cyg_thread_create</function> when the current thread was
created. This handle can then be passed to other functions such as
<function>cyg_thread_get_priority</function>.
      </para>
      <para>
<function>cyg_thread_idle_thread</function> returns the handle
corresponding to the idle thread. This thread is created automatically
by the kernel, so application-code has no other way of getting hold of
this information.
      </para>
      <para>
<function>cyg_thread_get_stack_base</function> and
<function>cyg_thread_get_stack_size</function> return information
about a specific thread's stack. The values returned will match the
values passed to <function>cyg_thread_create</function> when this
thread was created.
      </para>
      <para>
<function>cyg_thread_measure_stack_usage</function> is only available
if the configuration option
<varname>CYGFUN_KERNEL_THREADS_STACK_MEASUREMENT</varname> is enabled.
The return value is the maximum number of bytes of stack space used so
far by the specified thread. Note that this should not be considered a
true upper bound, for example it is possible that in the current test
run the specified thread has not yet been interrupted at the deepest
point in the function call graph. Never the less the value returned
can give some useful indication of the thread's stack requirements.
      </para>
      <para>
<function>cyg_thread_get_next</function> is used to enumerate all the
current threads in the system. It should be called initially with the
locations pointed to by <parameter>thread</parameter> and
<parameter>id</parameter> set to zero. On return these will be set to
the handle and ID of the first thread. On subsequent calls, these
parameters should be left set to the values returned by the previous
call.  The handle and ID of the next thread in the system will be
installed each time, until a <literal>false</literal> return value
indicates the end of the list.
      </para>
      <para>
<function>cyg_thread_get_info</function> fills in the
<type>cyg_thread_info</type> structure with information about the
thread described by the <parameter>thread</parameter> and
<parameter>id</parameter> arguments. The information returned includes
the thread's handle and id, its state and name, priorities and stack
parameters. If the thread does not exist the function returns
<literal>false</literal>.
    </para>
    <para>
The <type>cyg_thread_info</type> structure is defined as follows by
&lt;<filename class=headerfile>cyg/kernel/kapi.h</filename>&gt;, but may
be extended in future with additional members, and so its size should
not be relied upon:
<programlisting>
typedef struct
{
    <type>cyg_handle_t</type>        <structfield>handle</structfield>;
    <type>cyg_uint16</type>          <structfield>id</structfield>;
    <type>cyg_uint32</type>          <structfield>state</structfield>;
    <type>char</type>                <structfield>*name</structfield>;
    <type>cyg_priority_t</type>      <structfield>set_pri</structfield>;
    <type>cyg_priority_t</type>      <structfield>cur_pri</structfield>;
    <type>cyg_addrword_t</type>      <structfield>stack_base</structfield>;
    <type>cyg_uint32</type>          <structfield>stack_size</structfield>;
    <type>cyg_uint32</type>          <structfield>stack_used</structfield>;
} cyg_thread_info;
</programlisting>
    </para>
    <para>
<function>cyg_thread_get_id</function> returns the unique thread ID for
the thread identified by <parameter>thread</parameter>.
    </para>
    <para>
<function>cyg_thread_find</function> returns a handle for the thread
whose ID is <parameter>id</parameter>. If no such thread exists, a
zero handle is returned.
    </para>
    </refsect1>

    <refsect1 id="kernel-thread-info-context"><title>Valid contexts</title>
      <para>
<function>cyg_thread_self</function> may only be called from thread
context. <function>cyg_thread_idle_thread</function> may be called
from thread or DSR context, but only after the system has been
initialized. <function>cyg_thread_get_stack_base</function>,
<function>cyg_thread_get_stack_size</function> and
<function>cyg_thread_measure_stack_usage</function> may be called
any time after the specified thread has been created, but measuring
stack usage involves looping over at least part of the thread's stack
so this should normally only be done from thread context.
<function>cyg_thread_get_id</function> may be called from any context
as long as the caller can guarantee that the supplied thread handle
remains valid.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-info-examples"><title>Examples</title>
      <para>
A simple example of the use of the
<function>cyg_thread_get_next</function> and
<function>cyg_thread_get_info</function> follows:      
      </para>
      <programlisting width=72>

#include &lt;cyg/kernel/kapi.h&gt;
#include &lt;stdio.h&gt;

void show_threads(void)
{
    cyg_handle_t thread = 0;
    cyg_uint16 id = 0;

    while( cyg_thread_get_next( &amp;thread, &amp;id ) )
    {
        cyg_thread_info info;

        if( !cyg_thread_get_info( thread, id, &amp;info ) )
            break;

        printf("ID: %04x name: %10s pri: %d\n",
                info.id, info.name?info.name:"----", info.set_pri );
    }
}

      </programlisting>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Thread control                 -->

  <refentry id="kernel-thread-control">

    <refmeta>
    <refentrytitle>Thread control</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_yield</refname>
      <refname>cyg_thread_delay</refname>
      <refname>cyg_thread_suspend</refname>
      <refname>cyg_thread_resume</refname>
      <refname>cyg_thread_release</refname>
      <refpurpose>Control whether or not a thread is running</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_thread_yield</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_thread_delay</function></funcdef>
          <paramdef>cyg_tick_count_t <parameter>delay</parameter></paramdef>
        </funcprototype>
        <funcprototype>
           <funcdef>void <function>cyg_thread_suspend</function></funcdef>
           <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
           <funcdef>void <function>cyg_thread_resume</function></funcdef>
           <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
           <funcdef>void <function>cyg_thread_release</function></funcdef>
           <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-thread-control-description">Description</title>
      <para>
These functions provide some control over whether or not a particular
thread can run. Apart from the required use of
<function>cyg_thread_resume</function> to start a newly-created
thread, application code should normally use proper synchronization
primitives such as condition variables or mail boxes.
      </para>
    </refsect1>

    <refsect1><title id="kernel-thread-control-yield">Yield</title>
      <para>
<function>cyg_thread_yield</function> allows a thread to relinquish
control of the processor to some other runnable thread which has the
same priority. This can have no effect on any higher-priority thread
since, if such a thread were runnable, the current thread would have
been preempted in its favour. Similarly it can have no effect on any
lower-priority thread because the current thread will always be run in
preference to those. As a consequence this function is only useful
in configurations with a scheduler that allows multiple threads to run
at the same priority, for example the mlqueue scheduler. If instead
the bitmap scheduler was being used then
<function>cyg_thread_yield()</function> would serve no purpose.
      </para>
      <para>
Even if a suitable scheduler such as the mlqueue scheduler has been
configured, <function>cyg_thread_yield</function> will still rarely
prove useful: instead timeslicing will be used to ensure that all
threads of a given priority get a fair slice of the available
processor time. However it is possible to disable timeslicing via the
configuration option <varname>CYGSEM_KERNEL_SCHED_TIMESLICE</varname>,
in which case <function>cyg_thread_yield</function> can be used to
implement a form of cooperative multitasking.
      </para>
    </refsect1>

    <refsect1><title id="kernel-thread-control-delay">Delay</title>
      <para>
<function>cyg_thread_delay</function> allows a thread to suspend until
the specified number of clock ticks have occurred. For example, if a
value of 1 is used and the system clock runs at a frequency of 100Hz
then the thread will sleep for up to 10 milliseconds. This
functionality depends on the presence of a real-time system clock, as
controlled by the configuration option
<varname>CYGVAR_KERNEL_COUNTERS_CLOCK</varname>.
      </para>
      <para>
If the application requires delays measured in milliseconds or similar
units rather than in clock ticks, some calculations are needed to
convert between these units as described in <xref
linkend="kernel-clocks">. Usually these calculations can be done by
the application developer, or at compile-time. Performing such
calculations prior to every call to
<function>cyg_thread_delay</function> adds unnecessary overhead to the
system. 
      </para>
    </refsect1>

    <refsect1><title id="kernel-thread-control-suspend">Suspend and Resume</title>
      <para>
Associated with each thread is a suspend counter. When a thread is
first created this counter is initialized to 1.
<function>cyg_thread_suspend</function> can be used to increment the
suspend counter, and <function>cyg_thread_resume</function> decrements
it. The scheduler will never run a thread with a non-zero suspend
counter. Therefore a newly created thread will not run until it has
been resumed.
      </para>
      <para>
An occasional problem with the use of suspend and resume functionality
is that a thread gets suspended more times than it is resumed and
hence never becomes runnable again. This can lead to very confusing
behaviour. To help with debugging such problems the kernel provides a
configuration option
<varname>CYGNUM_KERNEL_MAX_SUSPEND_COUNT_ASSERT</varname> which
imposes an upper bound on the number of suspend calls without matching
resumes, with a reasonable default value. This functionality depends
on infrastructure assertions being enabled.
      </para>
    </refsect1>

    <refsect1><title id="kernel-thread-control-release">Releasing a Blocked Thread</title>
      <para>
When a thread is blocked on a synchronization primitive such as a
semaphore or a mutex, or when it is waiting for an alarm to trigger,
it can be forcibly woken up using
<function>cyg_thread_release</function>. Typically this will call the
affected synchronization primitive to return false, indicating that
the operation was not completed successfully. This function has to be
used with great care, and in particular it should only be used on
threads that have been designed appropriately and check all return
codes. If instead it were to be used on, say, an arbitrary thread that
is attempting to claim a mutex then that thread might not bother to
check the result of the mutex lock operation - usually there would be
no reason to do so. Therefore the thread will now continue running in
the false belief that it has successfully claimed a mutex lock, and
the resulting behaviour is undefined. If the system has been built
with assertions enabled then it is possible that an assertion will
trigger when the thread tries to release the mutex it does not
actually own.
      </para>
      <para>
The main use of <function>cyg_thread_release</function> is in the
POSIX compatibility layer, where it is used in the implementation of
per-thread signals and cancellation handlers.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-control-context"><title>Valid contexts</title>
      <para>
<function>cyg_thread_yield</function> can only be called from thread
context, A DSR must always run to completion and cannot yield the
processor to some thread. <function>cyg_thread_suspend</function>,
<function>cyg_thread_resume</function>, and
<function>cyg_thread_release</function> may be called from thread or
DSR context. 
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Thread termination             -->

  <refentry id="kernel-thread-termination">

    <refmeta>
    <refentrytitle>Thread termination</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_exit</refname>
      <refname>cyg_thread_kill</refname>
      <refname>cyg_thread_delete</refname>
      <refpurpose>Allow threads to terminate</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_thread_exit</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_thread_kill</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_thread_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-thread-termination-description">Description</title>
      <para>
In many embedded systems the various threads are allocated statically,
created during initialization, and never need to terminate. This
avoids any need for dynamic memory allocation or other resource
management facilities. However if a given application does have a
requirement that some threads be created dynamically, must terminate,
and their resources such as the stack be reclaimed, then the kernel
provides the functions <function>cyg_thread_exit</function>,
<function>cyg_thread_kill</function>, and
<function>cyg_thread_delete</function>.
      </para>
      <para>
<function>cyg_thread_exit</function> allows a thread to terminate
itself, thus ensuring that it will not be run again by the scheduler.
However the <structname>cyg_thread</structname> data structure passed
to <function>cyg_thread_create</function> remains in use, and the
handle returned by <function>cyg_thread_create</function> remains
valid. This allows other threads to perform certain operations on the
terminated thread, for example to determine its stack usage via
<function>cyg_thread_measure_stack_usage</function>. When the handle
and <structname>cyg_thread</structname> structure are no longer
required, <function>cyg_thread_delete</function> should be called to
release these resources. If the stack was dynamically allocated then
this should not be freed until after the call to
<function>cyg_thread_delete</function>.
      </para>
      <para>
Alternatively, one thread may use <function>cyg_thread_kill</function>
on another This has much the same effect as the affected thread
calling <function>cyg_thread_exit</function>. However killing a thread
is generally rather dangerous because no attempt is made to unlock any
synchronization primitives currently owned by that thread or release
any other resources that thread may have claimed. Therefore use of
this function should be avoided, and
<function>cyg_thread_exit</function> is preferred.
<function>cyg_thread_kill</function> cannot be used by a thread to
kill itself.
      </para>
      <para>
<function>cyg_thread_delete</function> should be used on a thread
after it has exited and is no longer required. After this call the
thread handle is no longer valid, and both the
<structname>cyg_thread</structname> structure and the thread stack can
be re-used or freed. If <function>cyg_thread_delete</function> is
invoked on a thread that is still running then there is an implicit
call to <function>cyg_thread_kill</function>. This function returns
<literal>true</literal> if the delete was successful, and
<literal>false</literal> if the delete did not happen. The delete
may not happen for example if the thread being destroyed is a lower
priority thread than the running thread, and will thus not wake up
in order to exit until it is rescheduled.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-termination-context"><title>Valid contexts</title>
      <para>
<function>cyg_thread_exit</function>,
<function>cyg_thread_kill</function> and
<function>cyg_thread_delete</function> can only be called from thread
context. 
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Thread priorities              -->

  <refentry id="kernel-thread-priorities">

    <refmeta>
    <refentrytitle>Thread priorities</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_get_priority</refname>
      <refname>cyg_thread_get_current_priority</refname>
      <refname>cyg_thread_set_priority</refname>
      <refpurpose>Examine and manipulate thread priorities</refpurpose>
    </refnamediv>
    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>cyg_priority_t <function>cyg_thread_get_priority</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_priority_t <function>cyg_thread_get_current_priority</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_thread_set_priority</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
          <paramdef>cyg_priority_t <parameter>priority</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-thread-priorities-description">Description</title>
      <para>
Typical schedulers use the concept of a thread priority to determine
which thread should run next. Exactly what this priority consists of
will depend on the scheduler, but a typical implementation would be a
small integer in the range 0 to 31, with 0 being the highest priority.
Usually only the idle thread will run at the lowest priority. The
exact number of priority levels available depends on the
configuration, typically the option
<varname>CYGNUM_KERNEL_SCHED_PRIORITIES</varname>.
      </para>
      <para>
<function>cyg_thread_get_priority</function> can be used to determine
the priority of a thread, or more correctly the value last used in a
<function>cyg_thread_set_priority</function> call or when the thread
was first created. In some circumstances it is possible that the
thread is actually running at a higher priority. For example, if it
owns a mutex and priority ceilings or inheritance is being used to
prevent priority inversion problems, then the thread's priority may
have been boosted temporarily.
<function>cyg_thread_get_current_priority</function> returns the real
current priority.
      </para>
      <para>
In many applications appropriate thread priorities can be determined
and allocated statically. However, if it is necessary for a thread's
priority to change at run-time then the
<function>cyg_thread_set_priority</function> function provides this
functionality. 
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-priorities-context"><title>Valid contexts</title>
      <para>
<function>cyg_thread_get_priority</function> and
<function>cyg_thread_get_current_priority</function> can be called
from thread or DSR context, although the latter is rarely useful.
<function>cyg_thread_set_priority</function> should also only be
called from thread context.
      </para>
    </refsect1>
  </refentry>

<!-- }}} -->
<!-- {{{ Per-thread data                -->

  <refentry id="kernel-thread-data">

    <refmeta>
    <refentrytitle>Per-thread data</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_new_data_index</refname>
      <refname>cyg_thread_free_data_index</refname>
      <refname>cyg_thread_get_data</refname>
      <refname>cyg_thread_get_data_ptr</refname>
      <refname>cyg_thread_set_data</refname>
      <refpurpose>Manipulate per-thread data</refpurpose>
    </refnamediv>
    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>cyg_ucount32 <function>cyg_thread_new_data_index</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_thread_free_data_index</function></funcdef>
          <paramdef>cyg_ucount32 <parameter>index</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_addrword_t <function>cyg_thread_get_data</function></funcdef>
          <paramdef>cyg_ucount32 <parameter>index</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_addrword_t* <function>cyg_thread_get_data_ptr</function></funcdef>
          <paramdef>cyg_ucount32 <parameter>index</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_thread_set_data</function></funcdef>
          <paramdef>cyg_ucount32 <parameter>index</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>data</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-thread-data-description">Description</title>
      <para>
In some applications and libraries it is useful to have some data that
is specific to each thread. For example, many of the functions in the
POSIX compatibility package return -1 to indicate an error and store
additional information in what appears to be a global variable
<varname>errno</varname>. However, if multiple threads make concurrent
calls into the POSIX library and if <varname>errno</varname> were
really a global variable then a thread would have no way of knowing
whether the current <varname>errno</varname> value really corresponded
to the last POSIX call it made, or whether some other thread had run
in the meantime and made a different POSIX call which updated the
variable. To avoid such confusion <varname>errno</varname> is instead
implemented as a per-thread variable, and each thread has its own
instance.
      </para>
      <para>
The support for per-thread data can be disabled via the configuration
option <varname>CYGVAR_KERNEL_THREADS_DATA</varname>. If enabled, each
<structname>cyg_thread</structname> data structure holds a small array
of words. The size of this array is determined by the configuration
option <varname>CYGNUM_KERNEL_THREADS_DATA_MAX</varname>. When a
thread is created the array is filled with zeroes.
      </para>
      <para>
If an application needs to use per-thread data then it needs an index
into this array which has not yet been allocated to other code. This
index can be obtained by calling
<function>cyg_thread_new_data_index</function>, and then used in
subsequent calls to <function>cyg_thread_get_data</function>.
Typically indices are allocated during system initialization and
stored in static variables. If for some reason a slot in the array is
no longer required and can be re-used then it can be released by calling
<function>cyg_thread_free_data_index</function>, 
      </para>
      <para>
The current per-thread data in a given slot can be obtained using
<function>cyg_thread_get_data</function>. This implicitly operates on
the current thread, and its single argument should be an index as
returned by <function>cyg_thread_new_data_index</function>. The
per-thread data can be updated using
<function>cyg_thread_set_data</function>. If a particular item of
per-thread data is needed repeatedly then
<function>cyg_thread_get_data_ptr</function> can be used to obtain the
address of the data, and indirecting through this pointer allows the
data to be examined and updated efficiently.
      </para>
      <para>
Some packages, for example the error and POSIX packages, have
pre-allocated slots in the array of per-thread data. These slots
should not normally be used by application code, and instead slots
should be allocated during initialization by a call to
<function>cyg_thread_new_data_index</function>. If it is known that,
for example, the configuration will never include the POSIX
compatibility package then application code may instead decide to
re-use the slot allocated to that package,
<varname>CYGNUM_KERNEL_THREADS_DATA_POSIX</varname>, but obviously
this does involve a risk of strange and subtle bugs if the
application's requirements ever change.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-data-context"><title>Valid contexts</title>
      <para>
Typically <function>cyg_thread_new_data_index</function> is only
called during initialization, but may also be called at any time in
thread context. <function>cyg_thread_free_data_index</function>, if
used at all, can also be called during initialization or from thread
context. <function>cyg_thread_get_data</function>,
<function>cyg_thread_get_data_ptr</function>, and
<function>cyg_thread_set_data</function> may only be called from
thread context because they implicitly operate on the current thread.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Thread destructors             -->

  <refentry id="kernel-thread-destructors">

    <refmeta>
    <refentrytitle>Thread destructors</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_thread_add_destructor</refname>
      <refname>cyg_thread_rem_destructor</refname>
      <refpurpose>Call functions on thread termination</refpurpose>
    </refnamediv>
    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
typedef void (*<type>cyg_thread_destructor_fn</type>)(<type>cyg_addrword_t</type>);
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_thread_add_destructor</function></funcdef>
          <paramdef>cyg_thread_destructor_fn <parameter>fn</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>data</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_thread_rem_destructor</function></funcdef>
          <paramdef>cyg_thread_destructor_fn <parameter>fn</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>data</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-thread-destructors-description">Description</title>
      <para>
These functions are provided for cases when an application requires a
function to be automatically called when a thread exits. This is often
useful when, for example, freeing up resources allocated by the thread.
      </para>
      <para>
This support must be enabled with the configuration option
<varname>CYGPKG_KERNEL_THREADS_DESTRUCTORS</varname>. When enabled,
you may register a function of type
<type>cyg_thread_destructor_fn</type> to be called on thread
termination using <function>cyg_thread_add_destructor</function>. You
may also provide it with a piece of arbitrary information in the
<parameter>data</parameter> argument which will be passed to the
destructor function <parameter>fn</parameter> when the thread
terminates. If you no longer wish to call a function previous
registered with <function>cyg_thread_add_destructor</function>, you
may call <function>cyg_thread_rem_destructor</function> with the same
parameters used to register the destructor function. Both these
functions return <literal>true</literal> on success and
<literal>false</literal> on failure.
      </para>
      <para>
By default, thread destructors are per-thread, which means that registering
a destructor function only registers that function for the current thread.
In other words, each thread has its own list of destructors.
Alternatively you may disable the configuration option
<varname>CYGSEM_KERNEL_THREADS_DESTRUCTORS_PER_THREAD</varname> in which
case any registered destructors will be run when <emphasis>any</emphasis>
threads exit. In other words, the thread destructor list is global and all
threads have the same destructors.
      </para>
      <para>
There is a limit to the number of destructors which may be registered,
which can be controlled with the
<varname>CYGNUM_KERNEL_THREADS_DESTRUCTORS</varname> configuration
option. Increasing this value will very slightly increase the amount
of memory in use, and when
<varname>CYGSEM_KERNEL_THREADS_DESTRUCTORS_PER_THREAD</varname> is
enabled, the amount of memory used per thread will increase. When the
limit has been reached, <function>cyg_thread_add_destructor</function>
will return <literal>false</literal>.
      </para>
    </refsect1>

    <refsect1 id="kernel-thread-destructors-context"><title>Valid contexts</title>
      <para>
When <varname>CYGSEM_KERNEL_THREADS_DESTRUCTORS_PER_THREAD</varname>
is enabled, these functions must only be called from a thread context
as they implicitly operate on the current thread. When
<varname>CYGSEM_KERNEL_THREADS_DESTRUCTORS_PER_THREAD</varname> is
disabled, these functions may be called from thread or DSR context,
or at initialization time.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Exceptions                     -->

  <refentry id="kernel-exceptions">

    <refmeta>
    <refentrytitle>Exception handling</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_exception_set_handler</refname>
      <refname>cyg_exception_clear_handler</refname>
      <refname>cyg_exception_call_handler</refname>
      <refpurpose>Handle processor exceptions</refpurpose>
    </refnamediv>
    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_exception_set_handler</function></funcdef>
          <paramdef>cyg_code_t <parameter>exception_number</parameter></paramdef>
          <paramdef>cyg_exception_handler_t* <parameter>new_handler</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>new_data</parameter></paramdef>
          <paramdef>cyg_exception_handler_t** <parameter>old_handler</parameter></paramdef>
          <paramdef>cyg_addrword_t* <parameter>old_data</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_exception_clear_handler</function></funcdef>
          <paramdef>cyg_code_t <parameter>exception_number</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_exception_call_handler</function></funcdef>
          <paramdef>cyg_handle_t <parameter>thread</parameter></paramdef>
          <paramdef>cyg_code_t <parameter>exception_number</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>exception_info</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1><title id="kernel-exceptions-description">Description</title>
      <para>
Sometimes code attempts operations that are not legal on the current
hardware, for example dividing by zero, or accessing data through a
pointer that is not properly aligned. When this happens the hardware
will raise an exception. This is very similar to an interrupt, but
happens synchronously with code execution rather than asynchronously
and hence can be tied to the thread that is currently running.
      </para>
      <para>
The exceptions that can be raised depend very much on the hardware,
especially the processor. The corresponding documentation should be
consulted for more details. Alternatively the architectural HAL header
file <filename class="headerfile">hal_intr.h</filename>, or one of the
variant or platform header files it includes, will contain appropriate
definitions. The details of how to handle exceptions, including
whether or not it is possible to recover from them, also depend on the
hardware. 
      </para>
      <para>
Exception handling is optional, and can be disabled through the
configuration option <varname>CYGPKG_KERNEL_EXCEPTIONS</varname>. If
an application has been exhaustively tested and is trusted never to
raise a hardware exception then this option can be disabled and code
and data sizes will be reduced somewhat. If exceptions are left
enabled then the system will provide default handlers for the various
exceptions, but these do nothing. Even the specific type of exception
is ignored, so there is no point in attempting to decode this and
distinguish between say a divide-by-zero and an unaligned access.
If the application installs its own handlers and wants details of the
specific exception being raised then the configuration option
<varname>CYGSEM_KERNEL_EXCEPTIONS_DECODE</varname> has to be enabled.
      </para>
      <para>
An alternative handler can be installed using
<function>cyg_exception_set_handler</function>. This requires a code
for the exception, a function pointer for the new exception handler,
and a parameter to be passed to this handler. Details of the
previously installed exception handler will be returned via the
remaining two arguments, allowing that handler to be reinstated, or
null pointers can be used if this information is of no interest. An
exception handling function should take the following form:
      </para>
      <programlisting width=72>
void
my_exception_handler(cyg_addrword_t data, cyg_code_t exception, cyg_addrword_t info)
{
    &hellip;
}
      </programlisting>
      <para>
The data argument corresponds to the <parameter class="function">new_data</parameter> 
parameter supplied to <function>cyg_exception_set_handler</function>.
The exception code is provided as well, in case a single handler is
expected to support multiple exceptions. The <parameter class="function">info</parameter> 
argument will depend on the hardware and on the specific exception.
      </para>
      <para>
<function>cyg_exception_clear_handler</function> can be used to
restore the default handler, if desired. It is also possible for
software to raise an exception and cause the current handler to be
invoked, but generally this is useful only for testing.
      </para>
      <para>
By default the system maintains a single set of global exception
handlers. However, since exceptions occur synchronously it is
sometimes useful to handle them on a per-thread basis, and have a
different set of handlers for each thread. This behaviour can be
obtained by disabling the configuration option
<varname>CYGSEM_KERNEL_EXCEPTIONS_GLOBAL</varname>. If per-thread
exception handlers are being used then
<function>cyg_exception_set_handler</function> and
<function>cyg_exception_clear_handler</function> apply to the current
thread. Otherwise they apply to the global set of handlers.
      </para>

      <caution><para>
In the current implementation
<function>cyg_exception_call_handler</function> can only be used on
the current thread. There is no support for delivering an exception to
another thread.
      </para></caution>
      <note><para>
Exceptions at the eCos kernel level refer specifically to
hardware-related events such as unaligned accesses to memory or
division by zero. There is no relation with other concepts that are
also known as exceptions, for example the <literal>throw</literal> and
<literal>catch</literal> facilities associated with C++.
      </para></note>

    </refsect1>

    <refsect1 id="kernel-exceptions-context"><title>Valid contexts</title>
      <para>
If the system is configured with a single set of global exception
handlers then
<function>cyg_exception_set_handler</function> and
<function>cyg_exception_clear_handler</function> may be called during
initialization or from thread context. If instead per-thread exception
handlers are being used then it is not possible to install new
handlers during initialization because the functions operate
implicitly on the current thread, so they can only be called from
thread context. <function>cyg_exception_call_handler</function> should
only be called from thread context.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Counters                       -->

  <refentry id="kernel-counters">

    <refmeta>
    <refentrytitle>Counters</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_counter_create</refname>
      <refname>cyg_counter_delete</refname>
      <refname>cyg_counter_current_value</refname>
      <refname>cyg_counter_set_value</refname>
      <refname>cyg_counter_tick</refname>
      <refpurpose>Count event occurrences</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_counter_create</function></funcdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_counter* <parameter>counter</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_counter_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>counter</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_tick_count_t <function>cyg_counter_current_value</function></funcdef>
          <paramdef>cyg_handle_t <parameter>counter</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_counter_set_value</function></funcdef>
          <paramdef>cyg_handle_t <parameter>counter</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>new_value</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_counter_tick</function></funcdef>
          <paramdef>cyg_handle_t <parameter>counter</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-counters-description"><title>Description</title>
      <para>
Kernel counters can be used to keep track of how many times a
particular event has occurred. Usually this event is an external
signal of some sort. The most common use of counters is in the
implementation of clocks, but they can be useful with other event
sources as well. Application code can attach <link
linkend="kernel-alarms">alarms</link> to counters, causing a function
to be called when some number of events have occurred.
      </para>
      <para>
A new counter is initialized by a call to
<function>cyg_counter_create</function>. The first argument is used to
return a handle to the new counter which can be used for subsequent
operations. The second argument allows the application to provide the
memory needed for the object, thus eliminating any need for dynamic
memory allocation within the kernel. If a counter is no longer
required and does not have any alarms attached then
<function>cyg_counter_delete</function> can be used to release the
resources, allowing the <structname>cyg_counter</structname> data
structure to be re-used.
      </para>
      <para>
Initializing a counter does not automatically attach it to any source
of events. Instead some other code needs to call
<function>cyg_counter_tick</function> whenever a suitable event
occurs, which will cause the counter to be incremented and may cause
alarms to trigger. The current value associated with the counter can
be retrieved using <function>cyg_counter_current_value</function> and
modified with <function>cyg_counter_set_value</function>. Typically
the latter function is only used during initialization, for example to
set a clock to wallclock time, but it can be used to reset a counter
if necessary. However <function>cyg_counter_set_value</function> will
never trigger any alarms. A newly initialized counter has a starting
value of 0.
      </para>
      <para>
The kernel provides two different implementations of counters. The
default is <varname>CYGIMP_KERNEL_COUNTERS_SINGLE_LIST</varname> which
stores all alarms attached to the counter on a single list. This is
simple and usually efficient. However when a tick occurs the kernel
code has to traverse this list, typically at DSR level, so if there
are a significant number of alarms attached to a single counter this
will affect the system's dispatch latency. The alternative
implementation, <varname>CYGIMP_KERNEL_COUNTERS_MULTI_LIST</varname>,
stores each alarm in one of an array of lists such that at most one of
the lists needs to be searched per clock tick. This involves extra
code and data, but can improve real-time responsiveness in some
circumstances. Another configuration option that is relevant here
is <varname>CYGIMP_KERNEL_COUNTERS_SORT_LIST</varname>, which is
disabled by default. This provides a trade off between doing work
whenever a new alarm is added to a counter and doing work whenever a
tick occurs. It is application-dependent which of these is more
appropriate.
      </para>
    </refsect1>

    <refsect1 id="kernel-counters-context"><title>Valid contexts</title>
      <para>
<function>cyg_counter_create</function> is typically called during
system initialization but may also be called in thread context.
Similarly <function>cyg_counter_delete</function> may be called during
initialization or in thread context.
<function>cyg_counter_current_value</function>,
<function>cyg_counter_set_value</function> and
<function>cyg_counter_tick</function> may be called during
initialization or from thread or DSR context. In fact,
<function>cyg_counter_tick</function> is usually called from inside a
DSR in response to an external event of some sort.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Clocks                         -->

  <refentry id="kernel-clocks">

    <refmeta>
    <refentrytitle>Clocks</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_clock_create</refname>
      <refname>cyg_clock_delete</refname>
      <refname>cyg_clock_to_counter</refname>
      <refname>cyg_clock_set_resolution</refname>
      <refname>cyg_clock_get_resolution</refname>
      <refname>cyg_real_time_clock</refname>
      <refname>cyg_current_time</refname>
      <refpurpose>Provide system clocks</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_clock_create</function></funcdef>
          <paramdef>cyg_resolution_t <parameter>resolution</parameter></paramdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_clock* <parameter>clock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_clock_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>clock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_clock_to_counter</function></funcdef>
          <paramdef>cyg_handle_t <parameter>clock</parameter></paramdef>
          <paramdef>cyg_handle_t* <parameter>counter</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_clock_set_resolution</function></funcdef>
          <paramdef>cyg_handle_t <parameter>clock</parameter></paramdef>
          <paramdef>cyg_resolution_t <parameter>resolution</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_resolution_t <function>cyg_clock_get_resolution</function></funcdef>
          <paramdef>cyg_handle_t <parameter>clock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_handle_t <function>cyg_real_time_clock</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_tick_count_t <function>cyg_current_time</function></funcdef>
          <void>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-clocks-description"><title>Description</title>
      <para>
In the eCos kernel clock objects are a special form of <link
linkend="kernel-counters">counter</link> objects. They are attached to
a specific type of hardware, clocks that generate ticks at very
specific time intervals, whereas counters can be used with any event
source.
      </para>
      <para>
In a default configuration the kernel provides a single clock
instance, the real-time clock. This gets used for timeslicing and for
operations that involve a timeout, for example
<function>cyg_semaphore_timed_wait</function>. If this functionality
is not required it can be removed from the system using the
configuration option <varname>CYGVAR_KERNEL_COUNTERS_CLOCK</varname>.
Otherwise the real-time clock can be accessed by a call to
<function>cyg_real_time_clock</function>, allowing applications to
attach alarms, and the current counter value can be obtained using
<function>cyg_current_time</function>.
      </para>
      <para>
Applications can create and destroy additional clocks if desired,
using <function>cyg_clock_create</function> and
<function>cyg_clock_delete</function>. The first argument to
<function>cyg_clock_create</function> specifies the
<link linkend="kernel-clocks-resolution">resolution</link> this clock
will run at. The second argument is used to return a handle for this
clock object, and the third argument provides the kernel with the
memory needed to hold this object. This clock will not actually tick
by itself. Instead it is the responsibility of application code to
initialize a suitable hardware timer to generate interrupts at the
appropriate frequency, install an interrupt handler for this, and
call <function>cyg_counter_tick</function> from inside the DSR.
Associated with each clock is a kernel counter, a handle for which can
be obtained using <function>cyg_clock_to_counter</function>.
      </para>
    </refsect1>

    <refsect1 id="kernel-clocks-resolution"><title>Clock Resolutions and Ticks</title>
      <para>
At the kernel level all clock-related operations including delays,
timeouts and alarms work in units of clock ticks, rather than in units
of seconds or milliseconds. If the calling code, whether the
application or some other package, needs to operate using units such
as milliseconds then it has to convert from these units to clock
ticks.
      </para>
      <para>
The main reason for this is that it accurately reflects the
hardware: calling something like <function>nanosleep</function> with a
delay of ten nanoseconds will not work as intended on any real
hardware because timer interrupts simply will not happen that
frequently; instead calling <function>cyg_thread_delay</function> with
the equivalent delay of 0 ticks gives a much clearer indication that
the application is attempting something inappropriate for the target
hardware. Similarly, passing a delay of five ticks to
<function>cyg_thread_delay</function> makes it fairly obvious that
the current thread will be suspended for somewhere between four and
five clock periods, as opposed to passing 50000000 to
<function>nanosleep</function> which suggests a granularity that is
not actually provided.
      </para>
      <para>
A secondary reason is that conversion between clock ticks and units
such as milliseconds can be somewhat expensive, and whenever possible
should be done at compile-time or by the application developer rather
than at run-time. This saves code size and cpu cycles.
      </para>
      <para>
The information needed to perform these conversions is the clock
resolution. This is a structure with two fields, a dividend and a
divisor, and specifies the number of nanoseconds between clock ticks.
For example a clock that runs at 100Hz will have 10 milliseconds
between clock ticks, or 10000000 nanoseconds. The ratio between the
resolution's dividend and divisor will therefore be 10000000 to 1, and
typical values for these might be 1000000000 and 100. If the clock
runs at a different frequency, say 60Hz, the numbers could be
1000000000 and 60 respectively. Given a delay in nanoseconds, this can
be converted to clock ticks by multiplying with the the divisor and
then dividing by the dividend. For example a delay of 50 milliseconds
corresponds to 50000000 nanoseconds, and with a clock frequency of
100Hz this can be converted to
((50000000&nbsp;*&nbsp;100)&nbsp;/&nbsp;1000000000)&nbsp;=&nbsp;5
clock ticks. Given the large numbers involved this arithmetic normally
has to be done using 64-bit precision and the
<type>long&nbsp;long</type> data type, but allows code to run on
hardware with unusual clock frequencies.
      </para>
      <para>
The default frequency for the real-time clock on any platform is
usually about 100Hz, but platform-specific documentation should be
consulted for this information. Usually it is possible to override
this default by configuration options, but again this depends on the
capabilities of the underlying hardware. The resolution for any clock
can be obtained using <function>cyg_clock_get_resolution</function>.
For clocks created by application code, there is also a function
<function>cyg_clock_set_resolution</function>. This does not affect
the underlying hardware timer in any way, it merely updates the
information that will be returned in subsequent calls to
<function>cyg_clock_get_resolution</function>: changing the actual
underlying clock frequency will require appropriate manipulation of
the timer hardware.
      </para>
    </refsect1>

    <refsect1 id="kernel-clocks-context"><title>Valid contexts</title>
      <para>
<function>cyg_clock_create</function> is usually only called during
system initialization (if at all), but may also be called from thread
context. The same applies to <function>cyg_clock_delete</function>.
The remaining functions may be called during initialization, from
thread context, or from DSR context, although it should be noted that
there is no locking between
<function>cyg_clock_get_resolution</function> and
<function>cyg_clock_set_resolution</function> so theoretically it is
possible that the former returns an inconsistent data structure.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Alarms                         -->

  <refentry id="kernel-alarms">

    <refmeta>
    <refentrytitle>Alarms</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_alarm_create</refname>
      <refname>cyg_alarm_delete</refname>
      <refname>cyg_alarm_initialize</refname>
      <refname>cyg_alarm_enable</refname>
      <refname>cyg_alarm_disable</refname>
      <refpurpose>Run an alarm function when a number of events have occurred</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_alarm_create</function></funcdef>
          <paramdef>cyg_handle_t <parameter>counter</parameter></paramdef>
          <paramdef>cyg_alarm_t* <parameter>alarmfn</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>data</parameter></paramdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_alarm* <parameter>alarm</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_alarm_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>alarm</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_alarm_initialize</function></funcdef>
          <paramdef>cyg_handle_t <parameter>alarm</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>trigger</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>interval</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_alarm_enable</function></funcdef>
          <paramdef>cyg_handle_t <parameter>alarm</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_alarm_disable</function></funcdef>
          <paramdef>cyg_handle_t <parameter>alarm</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-alarms-description"><title>Description</title>
      <para>
Kernel alarms are used together with counters and allow for action to
be taken when a certain number of events have occurred. If the counter
is associated with a clock then the alarm action happens when the
appropriate number of clock ticks have occurred, in other words after
a certain period of time.
      </para>
      <para>
Setting up an alarm involves a two-step process. First the alarm must
be created with a call to <function>cyg_alarm_create</function>. This
takes five arguments. The first identifies the counter to which the
alarm should be attached. If the alarm should be attached to the
system's real-time clock then <function>cyg_real_time_clock</function>
and <function>cyg_clock_to_counter</function> can be used to get hold
of the appropriate handle. The next two arguments specify the action
to be taken when the alarm is triggered, in the form of a function
pointer and some data. This function should take the form:
      </para>
      <programlisting width=72>
void
alarm_handler(cyg_handle_t alarm, cyg_addrword_t data)
{
    &hellip;
}
      </programlisting>
      <para>
The data argument passed to the alarm function corresponds to the
third argument passed to <function>cyg_alarm_create</function>.
The fourth argument to <function>cyg_alarm_create</function> is used
to return a handle to the newly-created alarm object, and the final
argument provides the memory needed for the alarm object and thus
avoids any need for dynamic memory allocation within the kernel.
      </para>
      <para>
Once an alarm has been created a further call to
<function>cyg_alarm_initialize</function> is needed to activate it.
The first argument specifies the alarm. The second argument indicates
the number of events, for example clock ticks, that need to occur
before the alarm triggers. If the third argument is 0 then the alarm
will only trigger once. A non-zero value specifies that the alarm
should trigger repeatedly, with an interval of the specified number of
events.
      </para>
      <para>
Alarms can be temporarily disabled and reenabled using
<function>cyg_alarm_disable</function> and
<function>cyg_alarm_enable</function>. Alternatively another call to
<function>cyg_alarm_initialize</function> can be used to modify the
behaviour of an existing alarm. If an alarm is no longer required then
the associated resources can be released using
<function>cyg_alarm_delete</function>. 
      </para>
      <para>
The alarm function is invoked when a counter tick occurs, in other
words when there is a call to <function>cyg_counter_tick</function>,
and will happen in the same context. If the alarm is associated with
the system's real-time clock then this will be DSR context, following
a clock interrupt. If the alarm is associated with some other
application-specific counter then the details will depend on how that
counter is updated.
      </para>
      <para>
If two or more alarms are registered for precisely the same counter tick,
the order of execution of the alarm functions is unspecified.
      </para>
    </refsect1>

    <refsect1 id="kernel-alarms-context"><title>Valid contexts</title>
      <para>
<function>cyg_alarm_create</function>
<function>cyg_alarm_initialize</function> is typically called during
system initialization but may also be called in thread context. The
same applies to <function>cyg_alarm_delete</function>.
<function>cyg_alarm_initialize</function>,
<function>cyg_alarm_disable</function> and
<function>cyg_alarm_enable</function> may be called during
initialization or from thread or DSR context, but
<function>cyg_alarm_enable</function> and
<function>cyg_alarm_initialize</function> may be expensive operations
and should only be called when necessary.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Mutexes                        -->

  <refentry id="kernel-mutexes">

    <refmeta>
    <refentrytitle>Mutexes</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_mutex_init</refname>
      <refname>cyg_mutex_destroy</refname>
      <refname>cyg_mutex_lock</refname>
      <refname>cyg_mutex_trylock</refname>
      <refname>cyg_mutex_unlock</refname>
      <refname>cyg_mutex_release</refname>
      <refname>cyg_mutex_set_ceiling</refname>
      <refname>cyg_mutex_set_protocol</refname>
      <refpurpose>Synchronization primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_init</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_destroy</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mutex_lock</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mutex_trylock</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_unlock</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_release</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_set_ceiling</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
          <paramdef>cyg_priority_t <parameter>priority</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mutex_set_protocol</function></funcdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
          <paramdef>enum cyg_mutex_protocol <parameter>protocol/</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-mutexes-description"><title>Description</title>
      <para>
The purpose of mutexes is to let threads share resources safely. If
two or more threads attempt to manipulate a data structure with no
locking between them then the system may run for quite some time
without apparent problems, but sooner or later the data structure will
become inconsistent and the application will start behaving strangely
and is quite likely to crash. The same can apply even when
manipulating a single variable or some other resource. For example,
consider:
      </para>
<programlisting width=72>
static volatile int counter = 0;

void
process_event(void)
{
    &hellip;

    counter++;
}
</programlisting>
      <para>
Assume that after a certain period of time <varname>counter</varname>
has a value of 42, and two threads A and B running at the same
priority call <function>process_event</function>. Typically thread A
will read the value of <varname>counter</varname> into a register,
increment this register to 43, and write this updated value back to
memory. Thread B will do the same, so usually
<varname>counter</varname> will end up with a value of 44. However if
thread A is timesliced after reading the old value 42 but before
writing back 43, thread B will still read back the old value and will
also write back 43. The net result is that the counter only gets
incremented once, not twice, which depending on the application may
prove disastrous.
      </para>
      <para>
Sections of code like the above which involve manipulating shared data
are generally known as critical regions. Code should claim a lock
before entering a critical region and release the lock when leaving.
Mutexes provide an appropriate synchronization primitive for this.
      </para>
      <programlisting width=72>
static volatile int counter = 0;
static cyg_mutex_t  lock;

void
process_event(void)
{
    &hellip;

    cyg_mutex_lock(&amp;lock);
    counter++;
    cyg_mutex_unlock(&amp;lock);
}
      </programlisting>
      <para>
A mutex must be initialized before it can be used, by calling
<function>cyg_mutex_init</function>. This takes a pointer to a
<structname>cyg_mutex_t</structname> data structure which is typically
statically allocated, and may be part of a larger data structure. If a
mutex is no longer required and there are no threads waiting on it
then <function>cyg_mutex_destroy</function> can be used.
      </para>
      <para>
The main functions for using a mutex are
<function>cyg_mutex_lock</function> and
<function>cyg_mutex_unlock</function>. In normal operation
<function>cyg_mutex_lock</function> will return success after claiming
the mutex lock, blocking if another thread currently owns the mutex.
However the lock operation may fail if other code calls
<function>cyg_mutex_release</function> or
<function>cyg_thread_release</function>, so if these functions may get
used then it is important to check the return value. The current owner
of a mutex should call <function>cyg_mutex_unlock</function> when a
lock is no longer required. This operation must be performed by the
owner, not by another thread.
      </para>
      <para>
<function>cyg_mutex_trylock</function> is a variant of
<function>cyg_mutex_lock</function> that will always return
immediately, returning success or failure as appropriate. This
function is rarely useful. Typical code locks a mutex just before
entering a critical region, so if the lock cannot be claimed then
there may be nothing else for the current thread to do. Use of this
function may also cause a form of priority inversion if the owner
owner runs at a lower priority, because the priority inheritance code
will not be triggered. Instead the current thread continues running,
preventing the owner from getting any cpu time, completing the
critical region, and releasing the mutex.
      </para>
      <para>
<function>cyg_mutex_release</function> can be used to wake up all
threads that are currently blocked inside a call to
<function>cyg_mutex_lock</function> for a specific mutex. These lock
calls will return failure. The current mutex owner is not affected.
      </para>
    </refsect1>

    <refsect1 id="kernel-mutexes-priority-inversion"><title>Priority Inversion</title>
      <para>
The use of mutexes gives rise to a problem known as priority
inversion. In a typical scenario this requires three threads A, B, and
C, running at high, medium and low priority respectively. Thread A and
thread B are temporarily blocked waiting for some event, so thread C
gets a chance to run, needs to enter a critical region, and locks
a mutex. At this point threads A and B are woken up - the exact order
does not matter. Thread A needs to claim the same mutex but has to
wait until C has left the critical region and can release the mutex.
Meanwhile thread B works on something completely different and can
continue running without problems. Because thread C is running a lower
priority than B it will not get a chance to run until B blocks for
some reason, and hence thread A cannot run either. The overall effect
is that a high-priority thread A cannot proceed because of a lower
priority thread B, and priority inversion has occurred.
      </para>
      <para>
In simple applications it may be possible to arrange the code such
that priority inversion cannot occur, for example by ensuring that a
given mutex is never shared by threads running at different priority
levels. However this may not always be possible even at the
application level. In addition mutexes may be used internally by
underlying code, for example the memory allocation package, so careful
analysis of the whole system would be needed to be sure that priority
inversion cannot occur. Instead it is common practice to use one of
two techniques: priority ceilings and priority inheritance.
      </para>
      <para>
Priority ceilings involve associating a priority with each mutex.
Usually this will match the highest priority thread that will ever
lock the mutex. When a thread running at a lower priority makes a
successful call to <function>cyg_mutex_lock</function> or
<function>cyg_mutex_trylock</function> its priority will be boosted to
that of the mutex. For example, given the previous example the
priority associated with the mutex would be that of thread A, so for
as long as it owns the mutex thread C will run in preference to thread
B. When C releases the mutex its priority drops to the normal value
again, allowing A to run and claim the mutex. Setting the
priority for a mutex involves a call to
<function>cyg_mutex_set_ceiling</function>, which is typically called
during initialization. It is possible to change the ceiling
dynamically but this will only affect subsequent lock operations, not
the current owner of the mutex.
      </para>
      <para>
Priority ceilings are very suitable for simple applications, where for
every thread in the system it is possible to work out which mutexes
will be accessed. For more complicated applications this may prove
difficult, especially if thread priorities change at run-time. An
additional problem occurs for any mutexes outside the application, for
example used internally within eCos packages. A typical eCos package
will be unaware of the details of the various threads in the system,
so it will have no way of setting suitable ceilings for its internal
mutexes. If those mutexes are not exported to application code then 
using priority ceilings may not be viable. The kernel does provide a
configuration option
<varname>CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_DEFAULT_PRIORITY</varname>
that can be used to set the default priority ceiling for all mutexes,
which may prove sufficient.
      </para>
      <para>
The alternative approach is to use priority inheritance: if a thread
calls <function>cyg_mutex_lock</function> for a mutex that it
currently owned by a lower-priority thread, then the owner will have
its priority raised to that of the current thread. Often this is more
efficient than priority ceilings because priority boosting only
happens when necessary, not for every lock operation, and the required
priority is determined at run-time rather than by static analysis.
However there are complications when multiple threads running at
different priorities try to lock a single mutex, or when the current
owner of a mutex then tries to lock additional mutexes, and this makes
the implementation significantly more complicated than priority
ceilings. 
      </para>
      <para>
There are a number of configuration options associated with priority
inversion. First, if after careful analysis it is known that priority
inversion cannot arise then the component
<function>CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL</function>
can be disabled. More commonly this component will be enabled, and one
of either
<varname>CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT</varname>
or
<varname>CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING</varname>
will be selected, so that one of the two protocols is available for
all mutexes. It is possible to select multiple protocols, so that some
mutexes can have priority ceilings while others use priority
inheritance or no priority inversion protection at all. Obviously this
flexibility will add to the code size and to the cost of mutex
operations. The default for all mutexes will be controlled by
<varname>CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_DEFAULT</varname>,
and can be changed at run-time using
<function>cyg_mutex_set_protocol</function>. 
      </para>
      <para>
Priority inversion problems can also occur with other synchronization
primitives such as semaphores. For example there could be a situation
where a high-priority thread A is waiting on a semaphore, a
low-priority thread C needs to do just a little bit more work before
posting the semaphore, but a medium priority thread B is running and
preventing C from making progress. However a semaphore does not have
the concept of an owner, so there is no way for the system to know
that it is thread C which would next post to the semaphore. Hence
there is no way for the system to boost the priority of C
automatically and prevent the priority inversion. Instead situations
like this have to be detected by application developers and
appropriate precautions have to be taken, for example making sure that
all the threads run at suitable priorities at all times.
      </para>
      <warning><para>
The current implementation of priority inheritance within the eCos
kernel does not handle certain exceptional circumstances completely
correctly. Problems will only arise if a thread owns one mutex,
then attempts to claim another mutex, and there are other threads
attempting to lock these same mutexes. Although the system will
continue running, the current owners of the various mutexes involved
may not run at the priority they should. This situation never arises
in typical code because a mutex will only be locked for a small
critical region, and there is no need to manipulate other shared resources
inside this region. A more complicated implementation of priority
inheritance is possible but would add significant overhead and certain
operations would no longer be deterministic.
      </para></warning>
      <warning><para>
Support for priority ceilings and priority inheritance is not
implemented for all schedulers. In particular neither priority
ceilings nor priority inheritance are currently available for the
bitmap scheduler.
      </para></warning>
    </refsect1>

    <refsect1 id="kernel-mutexes-alternatives"><title>Alternatives</title>
      <para>
In nearly all circumstances, if two or more threads need to share some
data then protecting this data with a mutex is the correct thing to
do. Mutexes are the only primitive that combine a locking mechanism
and protection against priority inversion problems. However this
functionality is achieved at a cost, and in exceptional circumstances
such as an application's most critical inner loop it may be desirable
to use some other means of locking.
      </para>
      <para>
When a critical region is very very small it is possible to lock the
scheduler, thus ensuring that no other thread can run until the
scheduler is unlocked again. This is achieved with calls to <link
linkend="kernel-schedcontrol"><function>cyg_scheduler_lock</function></link>
and <function>cyg_scheduler_unlock</function>. If the critical region
is sufficiently small then this can actually improve both performance
and dispatch latency because <function>cyg_mutex_lock</function> also
locks the scheduler for a brief period of time. This approach will not
work on SMP systems because another thread may already be running on a
different processor and accessing the critical region.
      </para>
      <para>
Another way of avoiding the use of mutexes is to make sure that all
threads that access a particular critical region run at the same
priority and configure the system with timeslicing disabled
(<varname>CYGSEM_KERNEL_SCHED_TIMESLICE</varname>). Without
timeslicing a thread can only be preempted by a higher-priority one,
or if it performs some operation that can block. This approach
requires that none of the operations in the critical region can block,
so for example it is not legal to call
<function>cyg_semaphore_wait</function>. It is also vulnerable to
any changes in the configuration or to the various thread priorities:
any such changes may now have unexpected side effects. It will not
work on SMP systems.
      </para>
    </refsect1>

    <refsect1 id="kernel-mutexes-recursive"><title>Recursive Mutexes</title>
      <para>
The implementation of mutexes within the eCos kernel does not support
recursive locks. If a thread has locked a mutex and then attempts to
lock the mutex again, typically as a result of some recursive call in
a complicated call graph, then either an assertion failure will be
reported or the thread will deadlock. This behaviour is deliberate.
When a thread has just locked a mutex associated with some data
structure, it can assume that that data structure is in a consistent
state. Before unlocking the mutex again it must ensure that the data
structure is again in a consistent state. Recursive mutexes allow a
thread to make arbitrary changes to a data structure, then in a
recursive call lock the mutex again while the data structure is still
inconsistent. The net result is that code can no longer make any
assumptions about data structure consistency, which defeats the
purpose of using mutexes.
      </para>
    </refsect1>

    <refsect1 id="kernel-mutexes-context"><title>Valid contexts</title>
      <para>
<function>cyg_mutex_init</function>,
<function>cyg_mutex_set_ceiling</function> and
<function>cyg_mutex_set_protocol</function> are normally called during
initialization but may also be called from thread context. The
remaining functions should only be called from thread context. Mutexes
serve as a mutual exclusion mechanism between threads, and cannot be
used to synchronize between threads and the interrupt handling
subsystem. If a critical region is shared between a thread and a DSR
then it must be protected using <link
linkend="kernel-schedcontrol"><function>cyg_scheduler_lock</function></link>
and <function>cyg_scheduler_unlock</function>. If a critical region is
shared between a thread and an ISR, it must be protected by disabling
or masking interrupts. Obviously these operations must be used with
care because they can affect dispatch and interrupt latencies.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Condition variables            -->

  <refentry id="kernel-condition-variables">

    <refmeta>
    <refentrytitle>Condition Variables</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_cond_init</refname>
      <refname>cyg_cond_destroy</refname>
      <refname>cyg_cond_wait</refname>
      <refname>cyg_cond_timed_wait</refname>
      <refname>cyg_cond_signal</refname>
      <refname>cyg_cond_broadcast</refname>
      <refpurpose>Synchronization primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_cond_init</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
          <paramdef>cyg_mutex_t* <parameter>mutex</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_cond_destroy</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_cond_wait</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_cond_timed_wait</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>abstime</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_cond_signal</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_cond_broadcast</function></funcdef>
          <paramdef>cyg_cond_t* <parameter>cond</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-condition-variables-description"><title>Description</title>

      <para>
Condition variables are used in conjunction with mutexes to implement
long-term waits for some condition to become true. For example
consider a set of functions that control access to a pool of
resources:
      </para>

      <programlisting width=72>

cyg_mutex_t res_lock;
res_t res_pool[RES_MAX];
int res_count = RES_MAX;

void res_init(void)
{
    cyg_mutex_init(&amp;res_lock);
    &lt;fill pool with resources&gt;
}

res_t res_allocate(void)
{
    res_t res;

    cyg_mutex_lock(&amp;res_lock);               // lock the mutex

    if( res_count == 0 )                     // check for free resource
        res = RES_NONE;                      // return RES_NONE if none
    else
    {
        res_count--;                         // allocate a resources
        res = res_pool[res_count];
    }

    cyg_mutex_unlock(&amp;res_lock);             // unlock the mutex

    return res;
}

void res_free(res_t res)
{
    cyg_mutex_lock(&amp;res_lock);               // lock the mutex

    res_pool[res_count] = res;               // free the resource
    res_count++;

    cyg_mutex_unlock(&amp;res_lock);             // unlock the mutex
}
      </programlisting>

      <para>
These routines use the variable <varname>res_count</varname> to keep
track of the resources available. If there are none then
<function>res_allocate</function> returns <literal>RES_NONE</literal>,
which the caller must check for and take appropriate error handling
actions.
      </para>

      <para>
Now suppose that we do not want to return
<literal>RES_NONE</literal> when there are no resources, but want to
wait for one to become available. This is where a condition variable
can be used:
      </para>

      <programlisting width=72>

cyg_mutex_t res_lock;
cyg_cond_t res_wait;
res_t res_pool[RES_MAX];
int res_count = RES_MAX;

void res_init(void)
{
    cyg_mutex_init(&amp;res_lock);
    cyg_cond_init(&amp;res_wait, &amp;res_lock);
    &lt;fill pool with resources&gt;
}

res_t res_allocate(void)
{
    res_t res;

    cyg_mutex_lock(&amp;res_lock);               // lock the mutex

    while( res_count == 0 )                  // wait for a resources
        cyg_cond_wait(&amp;res_wait);

    res_count--;                             // allocate a resource
    res = res_pool[res_count];

    cyg_mutex_unlock(&amp;res_lock);             // unlock the mutex

    return res;
}

void res_free(res_t res)
{
    cyg_mutex_lock(&amp;res_lock);               // lock the mutex

    res_pool[res_count] = res;               // free the resource
    res_count++;

    cyg_cond_signal(&amp;res_wait);              // wake up any waiting allocators

    cyg_mutex_unlock(&amp;res_lock);             // unlock the mutex
}
      </programlisting>

      <para>
In this version of the code, when <function>res_allocate</function>
detects that there are no resources it calls
<function>cyg_cond_wait</function>. This does two things: it unlocks
the mutex, and puts the calling thread to sleep on the condition
variable. When <function>res_free</function> is eventually called, it
puts a resource back into the pool and calls
<function>cyg_cond_signal</function> to wake up any thread waiting on
the condition variable. When the waiting thread eventually gets to run again,
it will re-lock the mutex before returning from
<function>cyg_cond_wait</function>.
      </para>

      <para>
There are two important things to note about the way in which this
code works. The first is that the mutex unlock and wait in
<function>cyg_cond_wait</function> are atomic: no other thread can run
between the unlock and the wait. If this were not the case then a call
to <function>res_free</function> by that thread would release the
resource but the call to <function>cyg_cond_signal</function> would be
lost, and the first thread would end up waiting when there were
resources available.
      </para>

      <para>
The second feature is that the call to
<function>cyg_cond_wait</function> is in a <literal>while</literal>
loop and not a simple <literal>if</literal> statement. This is because
of the need to re-lock the mutex in <function>cyg_cond_wait</function>
when the signalled thread reawakens. If there are other threads
already queued to claim the lock then this thread must wait. Depending
on the scheduler and the queue order, many other threads may have
entered the critical section before this one gets to run. So the
condition that it was waiting for may have been rendered false. Using
a loop around all condition variable wait operations is the only way
to guarantee that the condition being waited for is still true after
waiting.
      </para>

      <para>
Before a condition variable can be used it must be initialized with a
call to <function>cyg_cond_init</function>. This requires two
arguments, memory for the data structure and a pointer to an existing
mutex. This mutex will not be initialized by
<function>cyg_cond_init</function>, instead a separate call to
<function>cyg_mutex_init</function> is required. If a condition
variable is no longer required and there are no threads waiting on it
then <function>cyg_cond_destroy</function> can be used.
      </para>
      <para>
When a thread needs to wait for a condition to be satisfied it can
call <function>cyg_cond_wait</function>. The thread must have already
locked the mutex that was specified in the
<function>cyg_cond_init</function> call. This mutex will be unlocked
and the current thread will be suspended in an atomic operation. When
some other thread performs a signal or broadcast operation the current
thread will be woken up and automatically reclaim ownership of the mutex
again, allowing it to examine global state and determine whether or
not the condition is now satisfied.
      </para>
      <para>
The kernel supplies a variant of this function,
<function>cyg_cond_timed_wait</function>, which can be used to wait on
the condition variable or until some number of clock ticks have
occurred. The number of ticks is specified as an absolute, not
relative tick count, and so in order to wait for a relative number of
ticks, the return value of the <function>cyg_current_time()</function>
function should be added to determine the absolute number of ticks.
The mutex will always be reclaimed before
<function>cyg_cond_timed_wait</function> returns, regardless of
whether it was a result of a signal operation or a timeout.
      </para>
      <para>
There is no <function>cyg_cond_trywait</function> function because
this would not serve any purpose. If a thread has locked the mutex and
determined that the condition is satisfied, it can just release the
mutex and return. There is no need to perform any operation on the
condition variable.
      </para>
      <para>
When a thread changes shared state that may affect some other thread
blocked on a condition variable, it should call either
<function>cyg_cond_signal</function> or
<function>cyg_cond_broadcast</function>. These calls do not require
ownership of the mutex, but usually the mutex will have been claimed
before updating the shared state. A signal operation only wakes up the
first thread that is waiting on the condition variable, while a
broadcast wakes up all the threads. If there are no threads waiting on
the condition variable at the time, then the signal or broadcast will
have no effect: past signals are not counted up or remembered in any
way. Typically a signal should be used when all threads will check the
same condition and at most one thread can continue running. A
broadcast should be used if threads check slightly different
conditions, or if the change to the global state might allow multiple
threads to proceed.
      </para>
    </refsect1>

    <refsect1 id="kernel-condition-variables-context"><title>Valid contexts</title>
      <para>
<function>cyg_cond_init</function> is typically called during system
initialization but may also be called in thread context. The same
applies to <function>cyg_cond_delete</function>.
<function>cyg_cond_wait</function> and
<function>cyg_cond_timedwait</function> may only be called from thread
context since they may block. <function>cyg_cond_signal</function> and
<function>cyg_cond_broadcast</function> may be called from thread or
DSR context.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Semaphores                     -->

  <refentry id="kernel-semaphores">

    <refmeta>
    <refentrytitle>Semaphores</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_semaphore_init</refname>
      <refname>cyg_semaphore_destroy</refname>
      <refname>cyg_semaphore_wait</refname>
      <refname>cyg_semaphore_timed_wait</refname>
      <refname>cyg_semaphore_post</refname>
      <refname>cyg_semaphore_peek</refname>
      <refpurpose>Synchronization primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_semaphore_init</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
          <paramdef>cyg_count32 <parameter>val</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_semaphore_destroy</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_semaphore_wait</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_semaphore_timed_wait</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>abstime</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_semaphore_trywait</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_semaphore_post</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_semaphore_peek</function></funcdef>
          <paramdef>cyg_sem_t* <parameter>sem</parameter></paramdef>
          <paramdef>cyg_count32* <parameter>val</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-semaphores-description"><title>Description</title>
      <para>
Counting semaphores are a <link
linkend="kernel-overview-synch-primitives">synchronization
primitive</link> that allow threads to wait until an event has
occurred. The event may be generated by a producer thread, or by a DSR
in response to a hardware interrupt. Associated with each semaphore is
an integer counter that keeps track of the number of events that have
not yet been processed. If this counter is zero, an attempt by a
consumer thread to wait on the semaphore will block until some other
thread or a DSR posts a new event to the semaphore. If the counter is
greater than zero then an attempt to wait on the semaphore will
consume one event, in other words decrement the counter, and return
immediately. Posting to a semaphore will wake up the first thread that
is currently waiting, which will then resume inside the semaphore wait
operation and decrement the counter again.
      </para>
      <para>
Another use of semaphores is for certain forms of resource management.
The counter would correspond to how many of a certain type of resource
are currently available, with threads waiting on the semaphore to
claim a resource and posting to release the resource again. In
practice <link linkend="kernel-condition-variables">condition
variables</link> are usually much better suited for operations like
this.
      </para>
      <para>
<function>cyg_semaphore_init</function> is used to initialize a
semaphore. It takes two arguments, a pointer to a
<structname>cyg_sem_t</structname> structure and an initial value for
the counter. Note that semaphore operations, unlike some other parts
of the kernel API, use pointers to data structures rather than
handles. This makes it easier to embed semaphores in a larger data
structure. The initial counter value can be any number, zero, positive
or negative, but typically a value of zero is used to indicate that no
events have occurred yet.
      </para>
      <para>
<function>cyg_semaphore_wait</function> is used by a consumer thread
to wait for an event. If the current counter is greater than 0, in
other words if the event has already occurred in the past, then the
counter will be decremented and the call will return immediately.
Otherwise the current thread will be blocked until there is a
<function>cyg_semaphore_post</function> call.
      </para>
      <para>
<function>cyg_semaphore_post</function> is called when an event has
occurs. This increments the counter and wakes up the first thread
waiting on the semaphore (if any). Usually that thread will then
continue running inside <function>cyg_semaphore_wait</function> and
decrement the counter again. However other scenarioes are possible.
For example the thread calling <function>cyg_semaphore_post</function>
may be running at high priority, some other thread running at medium
priority may be about to call <function>cyg_semaphore_wait</function>
when it next gets a chance to run, and a low priority thread may be
waiting on the semaphore. What will happen is that the current high
priority thread continues running until it is descheduled for some
reason, then the medium priority thread runs and its call to
<function>cyg_semaphore_wait</function> succeeds immediately, and
later on the low priority thread runs again, discovers a counter value
of 0, and blocks until another event is posted. If there are multiple
threads blocked on a semaphore then the configuration option
<varname>CYGIMP_KERNEL_SCHED_SORTED_QUEUES</varname> determines which
one will be woken up by a post operation.
      </para>
      <para>
<function>cyg_semaphore_wait</function> returns a boolean. Normally it
will block until it has successfully decremented the counter, retrying
as necessary, and return success. However the wait operation may be
aborted by a call to <link
linkend="kernel-thread-control"><function>cyg_thread_release</function></link>,
and <function>cyg_semaphore_wait</function> will then return false.
      </para>
      <para>
<function>cyg_semaphore_timed_wait</function> is a variant of
<function>cyg_semaphore_wait</function>. It can be used to wait until
either an event has occurred or a number of clock ticks have happened.
The number of ticks is specified as an absolute, not relative tick
count, and so in order to wait for a relative number of ticks, the
return value of the <function>cyg_current_time()</function> function
should be added to determine the absolute number of ticks. The
function returns success if the semaphore wait operation succeeded, or
false if the operation timed out or was aborted by
<function>cyg_thread_release</function>.
If support for the real-time
clock has been removed from the current configuration then this
function will not be available.
<function>cyg_semaphore_trywait</function> is another variant which
will always return immediately rather than block, again returning
success or failure. If <function>cyg_semaphore_timedwait</function>
is given a timeout in the past, it operates like
<function>cyg_semaphore_trywait</function>.
      </para>
      <para>
<function>cyg_semaphore_peek</function> can be used to get hold of the
current counter value. This function is rarely useful except for
debugging purposes since the counter value may change at any time if
some other thread or a DSR performs a semaphore operation.
      </para>
    </refsect1>

    <refsect1 id="kernel-semaphores-context"><title>Valid contexts</title>
      <para>
<function>cyg_semaphore_init</function> is normally called during
initialization but may also be called from thread context.
<function>cyg_semaphore_wait</function> and
<function>cyg_semaphore_timed_wait</function> may only be called from
thread context because these operations may block.
<function>cyg_semaphore_trywait</function>,
<function>cyg_semaphore_post</function> and
<function>cyg_semaphore_peek</function> may be called from thread or
DSR context.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Mail boxes                     -->

  <refentry id="kernel-mail-boxes">

    <refmeta>
    <refentrytitle>Mail boxes</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_mbox_create</refname>
      <refname>cyg_mbox_delete</refname>
      <refname>cyg_mbox_get</refname>
      <refname>cyg_mbox_timed_get</refname>
      <refname>cyg_mbox_tryget</refname>
      <refname>cyg_mbox_peek_item</refname>
      <refname>cyg_mbox_put</refname>
      <refname>cyg_mbox_timed_put</refname>
      <refname>cyg_mbox_tryput</refname>
      <refname>cyg_mbox_peek</refname>
      <refname>cyg_mbox_waiting_to_get</refname>
      <refname>cyg_mbox_waiting_to_put</refname>
      <refpurpose>Synchronization primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_mbox_create</function></funcdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_mbox* <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_mbox_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void* <function>cyg_mbox_get</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void* <function>cyg_mbox_timed_get</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>abstime</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void* <function>cyg_mbox_tryget</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_count32 <function>cyg_mbox_peek</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void* <function>cyg_mbox_peek_item</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mbox_put</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
          <paramdef>void* <parameter>item</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mbox_timed_put</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
          <paramdef>void* <parameter>item</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>abstime</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mbox_tryput</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
          <paramdef>void* <parameter>item</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mbox_waiting_to_get</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_mbox_waiting_to_put</function></funcdef>
          <paramdef>cyg_handle_t <parameter>mbox</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-mail-boxes-description"><title>Description</title>
      <para>
Mail boxes are a synchronization primitive. Like semaphores they
can be used by a consumer thread to wait until a certain event has
occurred, but the producer also has the ability to transmit some data
along with each event. This data, the message, is normally a pointer
to some data structure. It is stored in the mail box itself, so the
producer thread that generates the event and provides the data usually
does not have to block until some consumer thread is ready to receive
the event. However a mail box will only have a finite capacity,
typically ten slots. Even if the system is balanced and events are
typically consumed at least as fast as they are generated, a burst of
events can cause the mail box to fill up and the generating thread
will block until space is available again. This behaviour is very
different from semaphores, where it is only necessary to maintain a
counter and hence an overflow is unlikely.
      </para>
      <para>
Before a mail box can be used it must be created with a call to
<function>cyg_mbox_create</function>. Each mail box has a unique
handle which will be returned via the first argument and which should
be used for subsequent operations.
<function>cyg_mbox_create</function> also requires an area of memory
for the kernel structure, which is provided by the
<structname>cyg_mbox</structname> second argument. If a mail box is
no longer required then <function>cyg_mbox_delete</function> can be
used. This will simply discard any messages that remain posted.
      </para>
      <para>
The main function for waiting on a mail box is
<function>cyg_mbox_get</function>. If there is a pending message
because of a call to <function>cyg_mbox_put</function> then
<function>cyg_mbox_get</function> will return immediately with the
message that was put into the mail box. Otherwise this function
will block until there is a put operation. Exceptionally the thread
can instead be unblocked by a call to
<function>cyg_thread_release</function>, in which case
<function>cyg_mbox_get</function> will return a null pointer. It is
assumed that there will never be a call to
<function>cyg_mbox_put</function> with a null pointer, because it
would not be possible to distinguish between that and a release
operation. Messages are always retrieved in the order in which they
were put into the mail box, and there is no support for messages
with different priorities.
      </para>
      <para>
There are two variants of <function>cyg_mbox_get</function>. The
first, <function>cyg_mbox_timed_get</function> will wait until either
a message is available or until a number of clock ticks have occurred.
The number of ticks is specified as an absolute, not relative tick
count, and so in order to wait for a relative number of ticks, the
return value of the <function>cyg_current_time()</function> function
should be added to determine the absolute number of ticks.  If no
message is posted within the timeout then a null pointer will be
returned. <function>cyg_mbox_tryget</function> is a non-blocking
operation which will either return a message if one is available or a
null pointer.
      </para>
      <para>
New messages are placed in the mail box by calling
<function>cyg_mbox_put</function> or one of its variants. The main put
function takes two arguments, a handle to the mail box and a
pointer for the message itself. If there is a spare slot in the
mail box then the new message can be placed there immediately, and
if there is a waiting thread it will be woken up so that it can
receive the message. If the mail box is currently full then
<function>cyg_mbox_put</function> will block until there has been a
get operation and a slot is available. The
<function>cyg_mbox_timed_put</function> variant imposes a time limit
on the put operation, returning false if the operation cannot be
completed within the specified number of clock ticks and as for
<function>cyg_mbox_timed_get</function> this is an absolute tick
count. The <function>cyg_mbox_tryput</function> variant is
non-blocking, returning false if there are no free slots available and
the message cannot be posted without blocking.
      </para>
      <para>
There are a further four functions available for examining the current
state of a mailbox. The results of these functions must be used with
care because usually the state can change at any time as a result of
activity within other threads, but they may prove occasionally useful
during debugging or in special situations.
<function>cyg_mbox_peek</function> returns a count of the number of
messages currently stored in the mail box.
<function>cyg_mbox_peek_item</function> retrieves the first message,
but it remains in the mail box until a get operation is performed.
<function>cyg_mbox_waiting_to_get</function> and
<function>cyg_mbox_waiting_to_put</function> indicate whether or not
there are currently threads blocked in a get or a put operation on a
given mail box.
      </para>
      <para>
The number of slots in each mail box is controlled by a
configuration option
<varname>CYGNUM_KERNEL_SYNCH_MBOX_QUEUE_SIZE</varname>, with a default
value of 10. All mail boxes are the same size.
      </para>
    </refsect1>

    <refsect1 id="kernel-mail-boxes-context"><title>Valid contexts</title>
      <para>
<function>cyg_mbox_create</function> is typically called during
system initialization but may also be called in thread context.
The remaining functions are normally called only during thread
context. Of special note is <function>cyg_mbox_put</function> which
can be a blocking operation when the mail box is full, and which
therefore must never be called from DSR context. It is permitted to
call <function>cyg_mbox_tryput</function>,
<function>cyg_mbox_tryget</function>, and the information functions
from DSR context but this is rarely useful.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Flags                          -->

  <refentry id="kernel-flags">

    <refmeta>
    <refentrytitle>Event Flags</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_flag_init</refname>
      <refname>cyg_flag_destroy</refname>
      <refname>cyg_flag_setbits</refname>
      <refname>cyg_flag_maskbits</refname>
      <refname>cyg_flag_wait</refname>
      <refname>cyg_flag_timed_wait</refname>
      <refname>cyg_flag_poll</refname>
      <refname>cyg_flag_peek</refname>
      <refname>cyg_flag_waiting</refname>
      <refpurpose>Synchronization primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_flag_init</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_flag_destroy</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_flag_setbits</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
          <paramdef>cyg_flag_value_t <parameter>value</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_flag_maskbits</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
          <paramdef>cyg_flag_value_t <parameter>value</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_flag_value_t <function>cyg_flag_wait</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
          <paramdef>cyg_flag_value_t <parameter>pattern</parameter></paramdef>
          <paramdef>cyg_flag_mode_t <parameter>mode</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_flag_value_t <function>cyg_flag_timed_wait</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
          <paramdef>cyg_flag_value_t <parameter>pattern</parameter></paramdef>
          <paramdef>cyg_flag_mode_t <parameter>mode</parameter></paramdef>
          <paramdef>cyg_tick_count_t <parameter>abstime</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_flag_value_t <function>cyg_flag_poll</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
          <paramdef>cyg_flag_value_t <parameter>pattern</parameter></paramdef>
          <paramdef>cyg_flag_mode_t <parameter>mode</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_flag_value_t <function>cyg_flag_peek</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_flag_waiting</function></funcdef>
          <paramdef>cyg_flag_t* <parameter>flag</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-flags-description"><title>Description</title>
      <para>
Event flags allow a consumer thread to wait for one of several
different types of event to occur. Alternatively it is possible to
wait for some combination of events. The implementation is relatively
straightforward. Each event flag contains a 32-bit integer.
Application code associates these bits with specific events, so for
example bit 0 could indicate that an I/O operation has completed and
data is available, while bit 1 could indicate that the user has
pressed a start button. A producer thread or a DSR can cause one or
more of the bits to be set, and a consumer thread currently waiting
for these bits will be woken up.
      </para>
      <para>
Unlike semaphores no attempt is made to keep track of event counts. It
does not matter whether a given event occurs once or multiple times
before being consumed, the corresponding bit in the event flag will
change only once. However semaphores cannot easily be used to handle
multiple event sources. Event flags can often be used as an
alternative to condition variables, although they cannot be used for
completely arbitrary conditions and they only support the equivalent
of condition variable broadcasts, not signals.
      </para>
      <para>
Before an event flag can be used it must be initialized by a call to
<function>cyg_flag_init</function>. This takes a pointer to a
<structname>cyg_flag_t</structname> data structure, which can be part of a
larger structure. All 32 bits in the event flag will be set to 0,
indicating that no events have yet occurred. If an event flag is no
longer required it can be cleaned up with a call to
<function>cyg_flag_destroy</function>, allowing the memory for the
<structfield>cyg_flag_t</structfield> structure to be re-used.
      </para>
      <para>
A consumer thread can wait for one or more events by calling
<function>cyg_flag_wait</function>. This takes three arguments. The
first identifies a particular event flag. The second is some
combination of bits, indicating which events are of interest. The
final argument should be one of the following:
      </para>
      <variablelist>
        <varlistentry>
          <term><literal>CYG_FLAG_WAITMODE_AND</literal></term>
          <listitem><para>
The call to <function>cyg_flag_wait</function> will block until all
the specified event bits are set. The event flag is not cleared when
the wait succeeds, in other words all the bits remain set.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term><literal>CYG_FLAG_WAITMODE_OR</literal></term>
          <listitem><para>
The call will block until at least one of the specified event bits is
set. The event flag is not cleared on return.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term><literal>CYG_FLAG_WAITMODE_AND | CYG_FLAG_WAITMODE_CLR</literal></term>
          <listitem><para>
The call will block until all the specified event bits are set, and
the entire event flag is cleared when the call succeeds. Note that
if this mode of operation is used then a single event flag cannot be
used to store disjoint sets of events, even though enough bits might
be available. Instead each disjoint set of events requires its own
event flag.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term><literal>CYG_FLAG_WAITMODE_OR | CYG_FLAG_WAITMODE_CLR</literal></term>
          <listitem><para>
The call will block until at least one of the specified event bits is
set, and the entire flag is cleared when the call succeeds.
          </para></listitem>
        </varlistentry>
      </variablelist>
      <para>
A call to <function>cyg_flag_wait</function> normally blocks until the
required condition is satisfied. It will return the value of the event
flag at the point that the operation succeeded, which may be a
superset of the requested events. If
<function>cyg_thread_release</function> is used to unblock a thread
that is currently in a wait operation, the
<function>cyg_flag_wait</function> call will instead return 0.
      </para>
      <para>
<function>cyg_flag_timed_wait</function> is a variant of
<function>cyg_flag_wait</function> which adds a timeout: the wait
operation must succeed within the specified number of ticks, or it
will fail with a return value of 0. The number of ticks is specified
as an absolute, not relative tick count, and so in order to wait for a
relative number of ticks, the return value of the
<function>cyg_current_time()</function> function should be added to
determine the absolute number of ticks.
<function>cyg_flag_poll</function> is a non-blocking variant: if the
wait operation can succeed immediately it acts like
<function>cyg_flag_wait</function>, otherwise it returns immediately
with a value of 0.
      </para>
      <para>
<function>cyg_flag_setbits</function> is called by a producer thread
or from inside a DSR when an event occurs. The specified bits are or'd
into the current event flag value. This may cause one or more waiting 
threads to be woken up, if their conditions are now satisfied. How many 
threads are awoken depends on the use of <literal>CYG_FLAG_WAITMODE_CLR
</literal>. The queue of threads waiting on the flag is walked to find 
threads which now have their wake condition fulfilled. If the awoken thread 
has passed <literal>CYG_FLAG_WAITMODE_CLR</literal> the walking of the queue 
is terminated, otherwise the walk continues. Thus if no threads have passed 
<literal>CYG_FLAG_WAITMORE_CLR</literal> all threads with fulfilled 
conditions will be awoken. If <literal>CYG_FLAG_WAITMODE_CLR</literal> is 
passed by threads with fulfilled conditions, the number of awoken threads 
will depend on the order the threads are in the queue.
      </para>
      <para>
<function>cyg_flag_maskbits</function> can be used to clear one or
more bits in the event flag. This can be called from a producer when a
particular condition is no longer satisfied, for example when the user
is no longer pressing a particular button. It can also be used by a
consumer thread if <literal>CYG_FLAG_WAITMODE_CLR</literal> was not
used as part of the wait operation, to indicate that some but not all
of the active events have been consumed. If there are multiple
consumer threads performing wait operations without using
<function>CYG_FLAG_WAITMODE_CLR</function> then typically some
additional synchronization such as a mutex is needed to prevent
multiple threads consuming the same event.
      </para>
      <para>
Two additional functions are provided to query the current state of an
event flag. <function>cyg_flag_peek</function> returns the current
value of the event flag, and <function>cyg_flag_waiting</function> can
be used to find out whether or not there are any threads currently
blocked on the event flag. Both of these functions must be used with
care because other threads may be operating on the event flag.
      </para>
    </refsect1>

    <refsect1 id="kernel-flags-context"><title>Valid contexts</title>
      <para>
<function>cyg_flag_init</function> is typically called during system
initialization but may also be called in thread context. The same
applies to <function>cyg_flag_destroy</function>.
<function>cyg_flag_wait</function> and
<function>cyg_flag_timed_wait</function> may only be called from
thread context. The remaining functions may be called from thread or
DSR context.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Spinlocks                      -->

  <refentry id="kernel-spinlocks">

    <refmeta>
    <refentrytitle>Spinlocks</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_spinlock_create</refname>
      <refname>cyg_spinlock_destroy</refname>
      <refname>cyg_spinlock_spin</refname>
      <refname>cyg_spinlock_clear</refname>
      <refname>cyg_spinlock_test</refname>
      <refname>cyg_spinlock_spin_intsave</refname>
      <refname>cyg_spinlock_clear_intsave</refname>
      <refpurpose>Low-level Synchronization Primitive</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_init</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
          <paramdef>cyg_bool_t <parameter>locked</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_destroy</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_spin</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_clear</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_spinlock_try</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_bool_t <function>cyg_spinlock_test</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_spin_intsave</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
          <paramdef>cyg_addrword_t* <parameter>istate</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_spinlock_clear_intsave</function></funcdef>
          <paramdef>cyg_spinlock_t* <parameter>lock</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>istate</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-spinlocks-description"><title>Description</title>
      <para>
Spinlocks provide an additional synchronization primitive for
applications running on SMP systems. They operate at a lower level
than the other primitives such as mutexes, and for most purposes the
higher-level primitives should be preferred. However there are some
circumstances where a spinlock is appropriate, especially when
interrupt handlers and threads need to share access to hardware, and
on SMP systems the kernel implementation itself depends on spinlocks.
      </para>
      <para>
Essentially a spinlock is just a simple flag. When code tries to claim
a spinlock it checks whether or not the flag is already set. If not
then the flag is set and the operation succeeds immediately. The exact
implementation of this is hardware-specific, for example it may use a
test-and-set instruction to guarantee the desired behaviour even if
several processors try to access the spinlock at the exact same time.
If it is not possible to claim a spinlock then the current thead spins
in a tight loop, repeatedly checking the flag until it is clear. This
behaviour is very different from other synchronization primitives such
as mutexes, where contention would cause a thread to be suspended. The
assumption is that a spinlock will only be held for a very short time.
If claiming a spinlock could cause the current thread to be suspended
then spinlocks could not be used inside interrupt handlers, which is
not acceptable.
      </para>
      <para>
This does impose a constraint on any code which uses spinlocks.
Specifically it is important that spinlocks are held only for a short
period of time, typically just some dozens of instructions. Otherwise
another processor could be blocked on the spinlock for a long time,
unable to do any useful work. It is also important that a thread which
owns a spinlock does not get preempted because that might cause
another processor to spin for a whole timeslice period, or longer. One
way of achieving this is to disable interrupts on the current
processor, and the function
<function>cyg_spinlock_spin_intsave</function> is provided to
facilitate this.
      </para>
      <para>
Spinlocks should not be used on single-processor systems. Consider a
high priority thread which attempts to claim a spinlock already held
by a lower priority thread: it will just loop forever and the lower
priority thread will never get another chance to run and release the
spinlock. Even if the two threads were running at the same priority,
the one attempting to claim the spinlock would spin until it was
timesliced and a lot of cpu time would be wasted. If an interrupt
handler tried to claim a spinlock owned by a thread, the interrupt
handler would loop forever. Therefore spinlocks are only appropriate
for SMP systems where the current owner of a spinlock can continue
running on a different processor.
      </para>
      <para>
Before a spinlock can be used it must be initialized by a call to
<function>cyg_spinlock_init</function>. This takes two arguments, a
pointer to a <function>cyg_spinlock_t</function> data structure, and
a flag to specify whether the spinlock starts off locked or unlocked.
If a spinlock is no longer required then it can be destroyed by a call
to <function>cyg_spinlock_destroy</function>.
      </para>
      <para>
There are two routines for claiming a spinlock:
<function>cyg_spinlock_spin</function> and
<function>cyg_spinlock_spin_intsave</function>. The former can be used
when it is known the current code will not be preempted, for example
because it is running in an interrupt handler or because interrupts
are disabled. The latter will disable interrupts in addition to
claiming the spinlock, so is safe to use in all circumstances. The
previous interrupt state is returned via the second argument, and
should be used in a subsequent call to
<function>cyg_spinlock_clear_intsave</function>. 
      </para>
      <para>
Similarly there are two routines for releasing a spinlock:
<function>cyg_spinlock_clear</function> and
<function>cyg_spinlock_clear_intsave</function>. Typically
the former will be used if the spinlock was claimed by a call to
<function>cyg_spinlock_spin</function>, and the latter when
<function>cyg_spinlock_intsave</function> was used.
      </para>
      <para>
There are two additional routines.
<function>cyg_spinlock_try</function> is a non-blocking version of
<function>cyg_spinlock_spin</function>: if possible the lock will be
claimed and the function will return <literal>true</literal>; otherwise the function
will return immediately with failure.
<function>cyg_spinlock_test</function> can be used to find out whether
or not the spinlock is currently locked. This function must be used
with care because, especially on a multiprocessor system, the state of
the spinlock can change at any time.
      </para>
      <para>
Spinlocks should only be held for a short period of time, and
attempting to claim a spinlock will never cause a thread to be
suspended. This means that there is no need to worry about priority
inversion problems, and concepts such as priority ceilings and
inheritance do not apply.
      </para>
    </refsect1>

    <refsect1 id="kernel-spinlocks-context"><title>Valid contexts</title>
      <para>
All of the spinlock functions can be called from any context,
including ISR and DSR context. Typically
<function>cyg_spinlock_init</function> is only called during system
initialization. 
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Scheduler control              -->

  <refentry id="kernel-schedcontrol">

    <refmeta>
    <refentrytitle>Scheduler Control</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_scheduler_start</refname>
      <refname>cyg_scheduler_lock</refname>
      <refname>cyg_scheduler_unlock</refname>
      <refname>cyg_scheduler_safe_lock</refname>
      <refname>cyg_scheduler_read_lock</refname>
      <refpurpose>Control the state of the scheduler</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_scheduler_start</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_scheduler_lock</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_scheduler_unlock</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_ucount32 <function>cyg_scheduler_read_lock</function></funcdef>
          <void>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-schedcontrol-description"><title>Description</title>
      <para>
<function>cyg_scheduler_start</function> should only be called once,
to mark the end of system initialization. In typical configurations it
is called automatically by the system startup, but some applications
may bypass the standard startup in which case
<function>cyg_scheduler_start</function> will have to be called
explicitly. The call will enable system interrupts, allowing I/O
operations to commence. Then the scheduler will be invoked and control
will be transferred to the highest priority runnable thread. The call
will never return.
      </para>
      <para>
The various data structures inside the eCos kernel must be protected
against concurrent updates. Consider a call to
<function>cyg_semaphore_post</function> which causes a thread to be
woken up: the semaphore data structure must be updated to remove the
thread from its queue; the scheduler data structure must also be
updated to mark the thread as runnable; it is possible that the newly
runnable thread has a higher priority than the current one, in which
case preemption is required. If in the middle of the semaphore post
call an interrupt occurred and the interrupt handler tried to
manipulate the same data structures, for example by making another
thread runnable, then it is likely that the structures will be left in
an inconsistent state and the system will fail.
      </para>
      <para>
To prevent such problems the kernel contains a special lock known as
the scheduler lock. A typical kernel function such as
<function>cyg_semaphore_post</function> will claim the scheduler lock,
do all its manipulation of kernel data structures, and then release
the scheduler lock. The current thread cannot be preempted while it
holds the scheduler lock. If an interrupt occurs and a DSR is supposed
to run to signal that some event has occurred, that DSR is postponed
until the scheduler unlock operation. This prevents concurrent updates
of kernel data structures.
      </para>
      <para>
The kernel exports three routines for manipulating the scheduler lock.
<function>cyg_scheduler_lock</function> can be called to claim the
lock. On return it is guaranteed that the current thread will not be
preempted, and that no other code is manipulating any kernel data
structures. <function>cyg_scheduler_unlock</function> can be used to
release the lock, which may cause the current thread to be preempted.
<function>cyg_scheduler_read_lock</function> can be used to query the
current state of the scheduler lock. This function should never be
needed because well-written code should always know whether or not the
scheduler is currently locked, but may prove useful during debugging.
      </para>
      <para>
The implementation of the scheduler lock involves a simple counter.
Code can call <function>cyg_scheduler_lock</function> multiple times,
causing the counter to be incremented each time, as long as
<function>cyg_scheduler_unlock</function> is called the same number of
times. This behaviour is different from mutexes where an attempt by a
thread to lock a mutex multiple times will result in deadlock or an
assertion failure.
      </para>
      <para>
Typical application code should not use the scheduler lock. Instead
other synchronization primitives such as mutexes and semaphores should
be used. While the scheduler is locked the current thread cannot be
preempted, so any higher priority threads will not be able to run.
Also no DSRs can run, so device drivers may not be able to service
I/O requests. However there is one situation where locking the
scheduler is appropriate: if some data structure needs to be shared
between an application thread and a DSR associated with some interrupt
source, the thread can use the scheduler lock to prevent concurrent
invocations of the DSR and then safely manipulate the structure. It is
desirable that the scheduler lock is held for only a short period of
time, typically some tens of instructions. In exceptional cases there
may also be some performance-critical code where it is more
appropriate to use the scheduler lock rather than a mutex, because the
former is more efficient.
      </para>
    </refsect1>

    <refsect1 id="kernel-schedcontrol-context"><title>Valid contexts</title>
      <para>
<function>cyg_scheduler_start</function> can only be called during
system initialization, since it marks the end of that phase. The
remaining functions may be called from thread or DSR context. Locking
the scheduler from inside the DSR has no practical effect because the
lock is claimed automatically by the interrupt subsystem before
running DSRs, but allows functions to be shared between normal thread
code and DSRs.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ Interrupt handling             -->

  <refentry id="kernel-interrupts">

    <refmeta>
    <refentrytitle>Interrupt Handling</refentrytitle>
    </refmeta>

    <refnamediv>
      <refname>cyg_interrupt_create</refname>
      <refname>cyg_interrupt_delete</refname>
      <refname>cyg_interrupt_attach</refname>
      <refname>cyg_interrupt_detach</refname>
      <refname>cyg_interrupt_configure</refname>
      <refname>cyg_interrupt_acknowledge</refname>
      <refname>cyg_interrupt_enable</refname>
      <refname>cyg_interrupt_disable</refname>
      <refname>cyg_interrupt_mask</refname>
      <refname>cyg_interrupt_mask_intunsafe</refname>
      <refname>cyg_interrupt_unmask</refname>
      <refname>cyg_interrupt_unmask_intunsafe</refname>
      <refname>cyg_interrupt_set_cpu</refname>
      <refname>cyg_interrupt_get_cpu</refname>
      <refname>cyg_interrupt_get_vsr</refname>
      <refname>cyg_interrupt_set_vsr</refname>
      <refpurpose>Manage interrupt handlers</refpurpose>
    </refnamediv>

    <refsynopsisdiv>
      <funcsynopsis>
        <funcsynopsisinfo>
#include &lt;cyg/kernel/kapi.h&gt;
        </funcsynopsisinfo>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_create</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
          <paramdef>cyg_priority_t <parameter>priority</parameter></paramdef>
          <paramdef>cyg_addrword_t <parameter>data</parameter></paramdef>
          <paramdef>cyg_ISR_t* <parameter>isr</parameter></paramdef>
          <paramdef>cyg_DSR_t* <parameter>dsr</parameter></paramdef>
          <paramdef>cyg_handle_t* <parameter>handle</parameter></paramdef>
          <paramdef>cyg_interrupt* <parameter>intr</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_delete</function></funcdef>
          <paramdef>cyg_handle_t <parameter>interrupt</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_attach</function></funcdef>
          <paramdef>cyg_handle_t <parameter>interrupt</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_detach</function></funcdef>
          <paramdef>cyg_handle_t <parameter>interrupt</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_configure</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
          <paramdef>cyg_bool_t <parameter>level</parameter></paramdef>
          <paramdef>cyg_bool_t <parameter>up</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_acknowledge</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_disable</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_enable</function></funcdef>
          <void>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_mask</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_mask_intunsafe</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_unmask</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_unmask_intunsafe</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_set_cpu</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
          <paramdef>cyg_cpu_t <parameter>cpu</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>cyg_cpu_t <function>cyg_interrupt_get_cpu</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_get_vsr</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
          <paramdef>cyg_VSR_t** <parameter>vsr</parameter></paramdef>
        </funcprototype>
        <funcprototype>
          <funcdef>void <function>cyg_interrupt_set_vsr</function></funcdef>
          <paramdef>cyg_vector_t <parameter>vector</parameter></paramdef>
          <paramdef>cyg_VSR_t* <parameter>vsr</parameter></paramdef>
        </funcprototype>
      </funcsynopsis>
    </refsynopsisdiv>

    <refsect1 id="kernel-interrupts-description"><title>Description</title>
      <para>
The kernel provides an interface for installing interrupt handlers and
controlling when interrupts occur. This functionality is used
primarily by eCos device drivers and by any application code that
interacts directly with hardware. However in most cases it is better
to avoid using this kernel functionality directly, and instead the
device driver API provided by the common HAL package should be used.
Use of the kernel package is optional, and some applications such as
RedBoot work with no need for multiple threads or synchronization
primitives. Any code which calls the kernel directly rather than the
device driver API will not function in such a configuration. When the
kernel package is present the device driver API is implemented as
<literal>#define</literal>'s to the equivalent kernel calls, otherwise
it is implemented inside the common HAL package. The latter
implementation can be simpler than the kernel one because there is no
need to consider thread preemption and similar issues.
      </para>
      <para>
The exact details of interrupt handling vary widely between
architectures. The functionality provided by the kernel abstracts away
from many of the details of the underlying hardware, thus simplifying
application development. However this is not always successful. For
example, if some hardware does not provide any support at all for
masking specific interrupts then calling
<function>cyg_interrupt_mask</function> may not behave as intended:
instead of masking just the one interrupt source it might disable all
interrupts, because that is as close to the desired behaviour as is
possible given the hardware restrictions. Another possibility is that
masking a given interrupt source also affects all lower-priority
interrupts, but still allows higher-priority ones. The documentation
for the appropriate HAL packages should be consulted for more
information about exactly how interrupts are handled on any given
hardware. The HAL header files will also contain useful information. 
      </para>
    </refsect1>

    <refsect1 id="kernel-interrupts-handlers"><title>Interrupt Handlers</title>
      <para>
Interrupt handlers are created by a call to
<function>cyg_interrupt_create</function>. This takes the following
arguments: 
      </para>
      <variablelist>
        <varlistentry>
          <term>cyg_vector_t <parameter>vector</parameter></term>
          <listitem><para>
The interrupt vector, a small integer, identifies the specific
interrupt source. The appropriate hardware documentation or HAL header
files should be consulted for details of which vector corresponds to
which device.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_priority_t <parameter>priority</parameter></term>
          <listitem><para>
Some hardware may support interrupt priorities, where a low priority
interrupt handler can in turn be interrupted by a higher priority one.
Again hardware-specific documentation should be consulted for details
about what the valid interrupt priority levels are.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_addrword_t <parameter>data</parameter></term>
          <listitem><para>
When an interrupt occurs eCos will first call the associated
interrupt service routine or ISR, then optionally a deferred service
routine or DSR. The <parameter>data</parameter> argument to
<function>cyg_interrupt_create</function> will be passed to both these
functions. Typically it will be a pointer to some data structure.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_ISR_t <parameter>isr</parameter></term>
          <listitem><para>
When an interrupt occurs the hardware will transfer control to the
appropriate vector service routine or VSR, which is usually provided
by eCos. This performs any appropriate processing, for example to work
out exactly which interrupt occurred, and then as quickly as possible
transfers control the installed ISR. An ISR is a C function which
takes the following form:
          </para>
          <programlisting width=72>
cyg_uint32
isr_function(cyg_vector_t vector, cyg_addrword_t data)
{
    cyg_bool_t dsr_required = 0;

    &hellip;

    return dsr_required ? CYG_ISR_CALL_DSR : CYG_ISR_HANDLED;
}
          </programlisting>
          <para>
The first argument identifies the particular interrupt source,
especially useful if there multiple instances of a given device and a
single ISR can be used for several different interrupt vectors. The
second argument is the <parameter>data</parameter> field passed to
<function>cyg_interrupt_create</function>, usually a pointer to some
data structure. The exact conditions under which an ISR runs will
depend partly on the hardware and partly on configuration options.
Interrupts may currently be disabled globally, especially if the
hardware does not support interrupt priorities. Alternatively
interrupts may be enabled such that higher priority interrupts are
allowed through. The ISR may be running on a separate interrupt stack,
or on the stack of whichever thread was running at the time the
interrupt happened.
          </para>
          <para>
A typical ISR will do as little work as possible, just enough to meet
the needs of the hardware and then acknowledge the interrupt by
calling <function>cyg_interrupt_acknowledge</function>. This ensures
that interrupts will be quickly reenabled, so higher priority devices
can be serviced. For some applications there may be one device which
is especially important and whose ISR can take much longer than
normal. However eCos device drivers usually will not assume that they
are especially important, so their ISRs will be as short as possible.
          </para>
          <para>
The return value of an ISR is normally one of
<literal>CYG_ISR_CALL_DSR</literal> or
<literal>CYG_ISR_HANDLED</literal>. The former indicates that further
processing is required at DSR level, and the interrupt handler's DSR
will be run as soon as possible. The latter indicates that the
interrupt has been fully handled and no further effort is required.
          </para>
          <para>
An ISR is allowed to make very few kernel calls. It can manipulate the
interrupt mask, and on SMP systems it can use spinlocks. However an
ISR must not make higher-level kernel calls such as posting to a
semaphore, instead any such calls must be made from the DSR. This
avoids having to disable interrupts throughout the kernel and thus
improves interrupt latency.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_DSR_t <parameter>dsr</parameter></term>
          <listitem><para>
If an interrupt has occurred and the ISR has returned a value
<literal>CYG_ISR_CALL_DSR</literal>, the system will call the
deferred service routine or DSR associated with this interrupt
handler. If the scheduler is not currently locked then the DSR will
run immediately. However if the interrupted thread was in the middle
of a kernel call and had locked the scheduler, then the DSR will be
deferred until the scheduler is again unlocked. This allows the
DSR to make certain kernel calls safely, for example posting to a
semaphore or signalling a condition variable. A DSR is a C function
which takes the following form:
          </para>
          <programlisting width=72>
void
dsr_function(cyg_vector_t vector,
             cyg_ucount32 count,
             cyg_addrword_t data)
{
}
          </programlisting>
          <para>
The first argument identifies the specific interrupt that has caused
the DSR to run. The second argument indicates the number of these
interrupts that have occurred and for which the ISR requested a DSR.
Usually this will be <literal>1</literal>, unless the system is
suffering from a very heavy load. The third argument is the
<parameter>data</parameter> field passed to
<function>cyg_interrupt_create</function>. 
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_handle_t* <parameter>handle</parameter></term>
          <listitem><para>
The kernel will return a handle to the newly created interrupt handler
via this argument. Subsequent operations on the interrupt handler such
as attaching it to the interrupt source will use this handle.
          </para></listitem>
        </varlistentry>
        <varlistentry>
          <term>cyg_interrupt* <parameter>intr</parameter></term>
          <listitem><para>
This provides the kernel with an area of memory for holding this
interrupt handler and associated data.
          </para></listitem>
        </varlistentry>
      </variablelist>
      <para>
The call to <function>cyg_interrupt_create</function> simply fills in
a kernel data structure. A typical next step is to call
<function>cyg_interrupt_attach</function> using the handle returned by
the create operation. This makes it possible to have several different
interrupt handlers for a given vector, attaching whichever one is
currently appropriate. Replacing an interrupt handler requires a call
to <function>cyg_interrupt_detach</function>, followed by another call
to <function>cyg_interrupt_attach</function> for the replacement
handler. <function>cyg_interrupt_delete</function> can be used if an
interrupt handler is no longer required.
      </para>
      <para>
Some hardware may allow for further control over specific interrupts,
for example whether an interrupt is level or edge triggered. Any such
hardware functionality can be accessed using
<function>cyg_interrupt_configure</function>: the
<parameter>level</parameter> argument selects between level versus
edge triggered; the <parameter>up</parameter> argument selects between
high and low level, or between rising and falling edges.
      </para>
      <para>
Usually interrupt handlers are created, attached and configured during
system initialization, while global interrupts are still disabled. On
most hardware it will also be necessary to call
<function>cyg_interrupt_unmask</function>, since the sensible default
for interrupt masking is to ignore any interrupts for which no handler
is installed.
      </para>
    </refsect1>

    <refsect1 id="kernel-interrupts-enable"><title>Controlling Interrupts</title>
      <para>
eCos provides two ways of controlling whether or not interrupts
happen. It is possible to disable and reenable all interrupts
globally, using <function>cyg_interrupt_disable</function> and
<function>cyg_interrupt_enable</function>. Typically this works by
manipulating state inside the cpu itself, for example setting a flag
in a status register or executing special instructions. Alternatively
it may be possible to mask a specific interrupt source by writing to
one or to several interrupt mask registers. Hardware-specific
documentation should be consulted for the exact details of how
interrupt masking works, because a full implementation is not possible
on all hardware.
      </para>
      <para>
The primary use for these functions is to allow data to be shared
between ISRs and other code such as DSRs or threads. If both a thread
and an ISR need to manipulate either a data structure or the hardware
itself, there is a possible conflict if an interrupt happens just when
the thread is doing such manipulation. Problems can be avoided by the
thread either disabling or masking interrupts during the critical
region. If this critical region requires only a few instructions then
usually it is more efficient to disable interrupts. For larger
critical regions it may be more appropriate to use interrupt masking,
allowing other interrupts to occur. There are other uses for interrupt
masking. For example if a device is not currently being used by the
application then it may be desirable to mask all interrupts generated
by that device.
      </para>
      <para>
There are two functions for masking a specific interrupt source,
<function>cyg_interrupt_mask</function> and
<function>cyg_interrupt_mask_intunsafe</function>. On typical hardware
masking an interrupt is not an atomic operation, so if two threads
were to perform interrupt masking operations at the same time there
could be problems. <function>cyg_interrupt_mask</function> disables
all interrupts while it manipulates the interrupt mask. In situations
where interrupts are already know to be disabled,
<function>cyg_interrupt_mask_intunsafe</function> can be used
instead. There are matching functions
<function>cyg_interrupt_unmask</function> and
<function>cyg_interrupt_unmask_intsafe</function>.
      </para>
    </refsect1>

    <refsect1 id="kernel-interrupts-smp"><title>SMP Support</title>
      <para>
On SMP systems the kernel provides an additional two functions related
to interrupt handling. <function>cyg_interrupt_set_cpu</function>
specifies that a particular hardware interrupt should always be
handled on one specific processor in the system. In other words when
the interrupt triggers it is only that processor which detects it, and
it is only on that processor that the VSR and ISR will run. If a DSR
is requested then it will also run on the same CPU. The
function <function>cyg_interrupt_get_cpu</function> can be used to
find out which interrupts are handled on which processor. 
      </para>
    </refsect1>

    <refsect1 id="kernel-interrupts-vsr"><title>VSR Support</title>
      <para>
When an interrupt occurs the hardware will transfer control to a piece
of code known as the VSR, or Vector Service Routine. By default this
code is provided by eCos. Usually it is written in assembler, but on
some architectures it may be possible to implement VSRs in C by
specifying an interrupt attribute. Compiler documentation should be
consulted for more information on this. The default eCos VSR will work
out which ISR function should process the interrupt, and set up a C
environment suitable for this ISR.
      </para>
      <para>
For some applications it may be desirable to replace the default eCos
VSR and handle some interrupts directly. This minimizes interrupt
latency, but it requires application developers to program at a lower
level. Usually the best way to write a custom VSR is to copy the
existing one supplied by eCos and then make appropriate modifications.
The function <function>cyg_interrupt_get_vsr</function> can be used to
get hold of the current VSR for a given interrupt vector, allowing it
to be restored if the custom VSR is no longer required.
<function>cyg_interrupt_set_vsr</function> can be used to install a
replacement VSR. Usually the <parameter>vsr</parameter> argument will
correspond to an exported label in an assembler source file.
      </para>
    </refsect1>

    <refsect1 id="kernel-interrupts-context"><title>Valid contexts</title>
      <para>
In a typical configuration interrupt handlers are created and attached
during system initialization, and never detached or deleted. However
it is possible to perform these operations at thread level, if
desired. Similarly <function>cyg_interrupt_configure</function>,
<function>cyg_interrupt_set_vsr</function>, and
<function>cyg_interrupt_set_cpu</function> are usually called only
during system initialization, but on typical hardware may be called at
any time. <function>cyg_interrupt_get_vsr</function> and
<function>cyg_interrupt_get_cpu</function> may be called at any time.
      </para>
      <para>
The functions for enabling, disabling, masking and unmasking
interrupts can be called in any context, when appropriate. It is the
responsibility of application developers to determine when the use of
these functions is appropriate.
      </para>
    </refsect1>

  </refentry>

<!-- }}} -->
<!-- {{{ tm_basic                       -->

  <refentry id="kernel-characterization">

    <refmeta>
    <refentrytitle>Kernel Real-time Characterization</refentrytitle>
    </refmeta>
    <refnamediv>
      <refname>tm_basic</refname>
      <refpurpose>Measure the performance of the eCos kernel</refpurpose>
    </refnamediv>

    <refsect1 id="kernel-characterization-description">
      <title>Description</title>
        <para>
When building a real-time system, care must be taken to ensure that
the system will be able to perform properly within the constraints of
that system. One of these constraints may be how fast certain
operations can be performed. Another might be how deterministic the
overall behavior of the system is. Lastly the memory footprint (size)
and unit cost may be important.
        </para>
        <para>
One of the major problems encountered while evaluating a system will
be how to compare it with possible alternatives. Most manufacturers of
real-time systems publish performance numbers, ostensibly so that
users can compare the different offerings. However, what these numbers
mean and how they were gathered is often not clear. The values are
typically measured on a particular piece of hardware, so in order to
truly compare, one must obtain measurements for exactly the same set
of hardware that were gathered in a similar fashion.
        </para>
        <para>
Two major items need to be present in any given set of measurements.
First, the raw values for the various operations; these are typically
quite easy to measure and will be available for most systems. Second,
the determinacy of the numbers; in other words how much the value
might change depending on other factors within the system. This value
is affected by a number of factors: how long interrupts might be
masked, whether or not the function can be interrupted, even very
hardware-specific effects such as cache locality and pipeline usage.
It is very difficult to measure the determinacy of any given
operation, but that determinacy is fundamentally important to proper
overall characterization of a system.
        </para>
        <para>
In the discussion and numbers that follow, three key measurements are
provided. The first measurement is an estimate of the interrupt
latency: this is the length of time from when a hardware interrupt
occurs until its Interrupt Service Routine (ISR) is called. The second
measurement is an estimate of overall interrupt overhead: this is the
length of time average interrupt processing takes, as measured by the
real-time clock interrupt (other interrupt sources will certainly take
a different amount of time, but this data cannot be easily gathered).
The third measurement consists of the timings for the various kernel
primitives.
          </para>
        </refsect1>
        <refsect1 id="kernel-characterization-methodology">
          <title>Methodology</title>
          <para>
Key operations in the kernel were measured by using a simple test
program which exercises the various kernel primitive operations. A
hardware timer, normally the one used to drive the real-time clock,
was used for these measurements. In most cases this timer can be read
with quite high resolution, typically in the range of a few
microseconds. For each measurement, the operation was repeated a
number of times. Time stamps were obtained directly before and after
the operation was performed. The data gathered for the entire set of
operations was then analyzed, generating average (mean), maximum and
minimum values. The sample variance (a measure of how close most
samples are to the mean) was also calculated. The cost of obtaining
the real-time clock timer values was also measured, and was subtracted
from all other times.
          </para>
          <para>
Most kernel functions can be measured separately. In each case, a
reasonable number of iterations are performed. Where the test case
involves a kernel object, for example creating a task, each iteration
is performed on a different object. There is also a set of tests which
measures the interactions between multiple tasks and certain kernel
primitives. Most functions are tested in such a way as to determine
the variations introduced by varying numbers of objects in the system.
For example, the mailbox tests measure the cost of a 'peek' operation
when the mailbox is empty, has a single item, and has multiple items
present. In this way, any effects of the state of the object or how
many items it contains can be determined.
          </para>
          <para>
There are a few things to consider about these measurements. Firstly,
they are quite micro in scale and only measure the operation in
question. These measurements do not adequately describe how the
timings would be perturbed in a real system with multiple interrupting
sources. Secondly, the possible aberration incurred by the real-time
clock (system heartbeat tick) is explicitly avoided. Virtually all
kernel functions have been designed to be interruptible. Thus the
times presented are typical, but best case, since any particular
function may be interrupted by the clock tick processing. This number
is explicitly calculated so that the value may be included in any
deadline calculations required by the end user. Lastly, the reported
measurements were obtained from a system built with all options at
their default values. Kernel instrumentation and asserts are also
disabled for these measurements. Any number of configuration options
can change the measured results, sometimes quite dramatically. For
example, mutexes are using priority inheritance in these measurements.
The numbers will change if the system is built with priority
inheritance on mutex variables turned off.
          </para>
          <para>
The final value that is measured is an estimate of interrupt latency.
This particular value is not explicitly calculated in the test program
used, but rather by instrumenting the kernel itself. The raw number of
timer ticks that elapse between the time the timer generates an
interrupt and the start of the timer ISR is kept in the kernel. These
values are printed by the test program after all other operations have
been tested. Thus this should be a reasonable estimate of the
interrupt latency over time.
          </para>
        </refsect1>

        <refsect1 id="kernel-characterization-using-measurements">
          <title>Using these Measurements</title>
          <para>
These measurements can be used in a number of ways. The most typical
use will be to compare different real-time kernel offerings on similar
hardware, another will be to estimate the cost of implementing a task
using eCos (applications can be examined to see what effect the kernel
operations will have on the total execution time). Another use would
be to observe how the tuning of the kernel affects overall operation.
          </para>
        </refsect1>

        <refsect1 id="kernel-characterization-influences">
          <title>Influences on Performance</title>
            <para>
A number of factors can affect real-time performance in a system. One
of the most common factors, yet most difficult to characterize, is the
effect of device drivers and interrupts on system timings. Different
device drivers will have differing requirements as to how long
interrupts are suppressed, for example. The eCos system has been
designed with this in mind, by separating the management of interrupts
(ISR handlers) and the processing required by the interrupt
(DSR&mdash;Deferred Service Routine&mdash; handlers). However, since
there is so much variability here, and indeed most device drivers will
come from the end users themselves, these effects cannot be reliably
measured. Attempts have been made to measure the overhead of the
single interrupt that eCos relies on, the real-time clock timer. This
should give you a reasonable idea of the cost of executing interrupt
handling for devices.
          </para>
        </refsect1>

       <refsect1 id="kernel-characterization-measured-items">
         <title>Measured Items</title>
         <para>
This section describes the various tests and the numbers presented.
All tests use the C kernel API (available by way of
<filename>cyg/kernel/kapi.h</filename>). There is a single main thread
in the system that performs the various tests. Additional threads may
be created as part of the testing, but these are short lived and are
destroyed between tests unless otherwise noted. The terminology
&ldquo;lower priority&rdquo; means a priority that is less important,
not necessarily lower in numerical value. A higher priority thread
will run in preference to a lower priority thread even though the
priority value of the higher priority thread may be numerically less
than that of the lower priority thread.
          </para>

          <refsect2 id="kernel-characterization-measure-threads">
            <title>Thread Primitives</title>
            <variablelist>
              <varlistentry>
                <term>Create thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_create()</function> call.
Each call creates a totally new thread. The set of threads created by
this test will be reused in the subsequent thread primitive tests.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Yield thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_yield()</function> call.
For this test, there are no other runnable threads, thus the test
should just measure the overhead of trying to give up the CPU.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Suspend &lsqb;suspended&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_suspend()</function> call.
A thread may be suspended multiple times; each thread is already
suspended from its initial creation, and is suspended again.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Resume thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_resume()</function> call.
All of the threads have a suspend count of 2, thus this call does not
make them runnable. This test just measures the overhead of resuming a
thread.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Set priority</term>
                <listitem><para>
This test measures the <function>cyg_thread_set_priority()</function>
call. Each thread, currently suspended, has its priority set to a new
value.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Get priority</term>
                <listitem><para>
This test measures the <function>cyg_thread_get_priority()</function>
call.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Kill &lsqb;suspended&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_kill()</function> call.
Each thread in the set is killed. All threads are known to be
suspended before being killed.
		</para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Yield &lsqb;no other&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_yield()</function> call
again. This is to demonstrate that the
<function>cyg_thread_yield()</function> call has a fixed overhead,
regardless of whether there are other threads in the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Resume &lsqb;suspended low priority&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_resume()</function> call
again. In this case, the thread being resumed is lower priority than
the main thread, thus it will simply become ready to run but not be
granted the CPU. This test measures the cost of making a thread ready
to run.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Resume &lsqb;runnable low priority&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_resume()</function> call
again. In this case, the thread being resumed is lower priority than
the main thread and has already been made runnable, so in fact the
resume call has no effect.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Suspend &lsqb;runnable&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_suspend()</function> call
again. In this case, each thread has already been made runnable (by
previous tests).
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Yield &lsqb;only low priority&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_yield()</function> call.
In this case, there are many other runnable threads, but they are all
lower priority than the main thread, thus no thread switches will take
place.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Suspend &lsqb;runnable-&gt;not runnable&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_suspend()</function> call
again. The thread being suspended will become non-runnable by this
action.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Kill &lsqb;runnable&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_kill()</function> call
again. In this case, the thread being killed is currently runnable,
but lower priority than the main thread.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Resume &lsqb;high priority&rsqb; thread</term>
                <listitem><para>
This test measures the <function>cyg_thread_resume()</function> call.
The thread being resumed is higher priority than the main thread, thus
a thread switch will take place on each call. In fact there will be
two thread switches; one to the new higher priority thread and a
second back to the test thread. The test thread exits immediately.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Thread switch</term>
                <listitem><para>
This test attempts to measure the cost of switching from one thread to
another. Two equal priority threads are started and they will each
yield to the other for a number of iterations. A time stamp is
gathered in one thread before the
<function>cyg_thread_yield()</function> call and after the call in the
other thread.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-scheduler"
            <title>Scheduler Primitives</title>
            <variablelist>
              <varlistentry>
                <term>Scheduler lock</term>
                <listitem><para>
This test measures the <function>cyg_scheduler_lock()</function> call.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Scheduler unlock &lsqb;0 threads&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_scheduler_unlock()</function>
call. There are no other threads in the system and the unlock happens
immediately after a lock so there will be no pending DSR&rsquo;s to
run.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Scheduler unlock &lsqb;1 suspended thread&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_scheduler_unlock()</function>
call. There is one other thread in the system which is currently
suspended.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Scheduler unlock &lsqb;many suspended threads&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_scheduler_unlock()</function>
call. There are many other threads in the system which are currently
suspended. The purpose of this test is to determine the cost of having
additional threads in the system when the scheduler is activated by
way of <function>cyg_scheduler_unlock()</function>.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Scheduler unlock &lsqb;many low priority threads&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_scheduler_unlock()</function>
call. There are many other threads in the system which are runnable
but are lower priority than the main thread. The purpose of this test
is to determine the cost of having additional threads in the system
when the scheduler is activated by way of
<function>cyg_scheduler_unlock()</function>.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-mutex">
            <title>Mutex Primitives</title>
            <variablelist>
              <varlistentry>
                <term>Init mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_init()</function> call. A
number of separate mutex variables are created. The purpose of this
test is to measure the cost of creating a new mutex and introducing it
to the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Lock &lsqb;unlocked&rsqb; mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_lock()</function> call. The
purpose of this test is to measure the cost of locking a mutex which
is currently unlocked. There are no other threads executing in the
system while this test runs.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Unlock &lsqb;locked&rsqb; mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_unlock()</function> call.
The purpose of this test is to measure the cost of unlocking a mutex
which is currently locked. There are no other threads executing in the
system while this test runs.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Trylock &lsqb;unlocked&rsqb; mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_trylock()</function> call.
The purpose of this test is to measure the cost of locking a mutex
which is currently unlocked. There are no other threads executing in
the system while this test runs.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Trylock &lsqb;locked&rsqb; mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_trylock()</function> call.
The purpose of this test is to measure the cost of locking a mutex
which is currently locked. There are no other threads executing in the
system while this test runs.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Destroy mutex</term>
                <listitem><para>
This test measures the <function>cyg_mutex_destroy()</function> call.
The purpose of this test is to measure the cost of deleting a mutex
from the system. There are no other threads executing in the system
while this test runs.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Unlock/Lock mutex</term>
                <listitem><para>
This test attempts to measure the cost of unlocking a mutex for which
there is another higher priority thread waiting. When the mutex is
unlocked, the higher priority waiting thread will immediately take the
lock. The time from when the unlock is issued until after the lock
succeeds in the second thread is measured, thus giving the round-trip
or circuit time for this type of synchronizer.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-mailbox">
            <title>Mailbox Primitives</title>
            <variablelist>
              <varlistentry>
                <term>Create mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_create()</function> call. A
number of separate mailboxes is created. The purpose of this test is
to measure the cost of creating a new mailbox and introducing it to
the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek &lsqb;empty&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_peek()</function> call. An
attempt is made to peek the value in each mailbox, which is currently
empty. The purpose of this test is to measure the cost of checking a
mailbox for a value without blocking.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Put &lsqb;first&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_put()</function> call. One
item is added to a currently empty mailbox. The purpose of this test
is to measure the cost of adding an item to a mailbox. There are no
other threads currently waiting for mailbox items to arrive.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek &lsqb;1 msg&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_peek()</function> call. An
attempt is made to peek the value in each mailbox, which contains a
single item. The purpose of this test is to measure the cost of
checking a mailbox which has data to deliver.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Put &lsqb;second&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_put()</function> call. A
second item is added to a mailbox. The purpose of this test is to
measure the cost of adding an additional item to a mailbox. There are
no other threads currently waiting for mailbox items to arrive.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek &lsqb;2 msgs&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_peek()</function> call. An
attempt is made to peek the value in each mailbox, which contains two
items. The purpose of this test is to measure the cost of checking a
mailbox which has data to deliver.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Get &lsqb;first&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_get()</function> call. The
first item is removed from a mailbox that currently contains two
items. The purpose of this test is to measure the cost of obtaining an
item from a mailbox without blocking.
              </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Get &lsqb;second&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_get()</function> call. The
last item is removed from a mailbox that currently contains one item.
The purpose of this test is to measure the cost of obtaining an item
from a mailbox without blocking.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tryput &lsqb;first&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_tryput()</function> call. A
single item is added to a currently empty mailbox. The purpose of this
test is to measure the cost of adding an item to a mailbox.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek item &lsqb;non-empty&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_peek_item()</function> call.
A single item is fetched from a mailbox that contains a single item.
The purpose of this test is to measure the cost of obtaining an item
without disturbing the mailbox.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tryget &lsqb;non-empty&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_tryget()</function> call. A
single item is removed from a mailbox that contains exactly one item.
The purpose of this test is to measure the cost of obtaining one item
from a non-empty mailbox.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek item &lsqb;empty&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_peek_item()</function> call.
An attempt is made to fetch an item from a mailbox that is empty. The
purpose of this test is to measure the cost of trying to obtain an
item when the mailbox is empty.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tryget &lsqb;empty&rsqb; mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_tryget()</function> call. An
attempt is made to fetch an item from a mailbox that is empty. The
purpose of this test is to measure the cost of trying to obtain an
item when the mailbox is empty.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Waiting to get mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_waiting_to_get()</function>
call. The purpose of this test is to measure the cost of determining
how many threads are waiting to obtain a message from this mailbox.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Waiting to put mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_waiting_to_put()</function>
call. The purpose of this test is to measure the cost of determining
how many threads are waiting to put a message into this mailbox.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Delete mbox</term>
                <listitem><para>
This test measures the <function>cyg_mbox_delete()</function> call.
The purpose of this test is to measure the cost of destroying a
mailbox and removing it from the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Put/Get mbox</term>
                <listitem><para>
In this round-trip test, one thread is sending data to a mailbox that
is being consumed by another thread. The time from when the data is
put into the mailbox until it has been delivered to the waiting thread
is measured. Note that this time will contain a thread switch.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-semaphore">
            <title>Semaphore Primitives</title>
            <variablelist>
              <varlistentry>
                <term>Init semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_init()</function> call.
A number of separate semaphore objects are created and introduced to
the system. The purpose of this test is to measure the cost of
creating a new semaphore.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Post &lsqb;0&rsqb; semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_post()</function> call.
Each semaphore currently has a value of 0 and there are no other
threads in the system. The purpose of this test is to measure the
overhead cost of posting to a semaphore. This cost will differ if
there is a thread waiting for the semaphore.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Wait &lsqb;1&rsqb; semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_wait()</function> call.
The semaphore has a current value of 1 so the call is non-blocking.
The purpose of the test is to measure the overhead of
&ldquo;taking&rdquo; a semaphore.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Trywait &lsqb;0&rsqb; semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_trywait()</function>
call. The semaphore has a value of 0 when the call is made. The
purpose of this test is to measure the cost of seeing if a semaphore
can be &ldquo;taken&rdquo; without blocking. In this case, the answer
would be no.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Trywait &lsqb;1&rsqb; semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_trywait()</function>
call. The semaphore has a value of 1 when the call is made. The
purpose of this test is to measure the cost of seeing if a semaphore
can be &ldquo;taken&rdquo; without blocking. In this case, the answer
would be yes.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Peek semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_peek()</function> call.
The purpose of this test is to measure the cost of obtaining the
current semaphore count value.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Destroy semaphore</term>
                <listitem><para>
This test measures the <function>cyg_semaphore_destroy()</function>
call. The purpose of this test is to measure the cost of deleting a
semaphore from the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Post/Wait semaphore</term>
                <listitem><para>
In this round-trip test, two threads are passing control back and
forth by using a semaphore. The time from when one thread calls
<function>cyg_semaphore_post()</function> until the other thread
completes its <function>cyg_semaphore_wait()</function> is measured.
Note that each iteration of this test will involve a thread switch.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-counters">
            <title>Counters</title>
            <variablelist>
              <varlistentry>
                <term>Create counter</term>
                <listitem><para>
This test measures the <function>cyg_counter_create()</function> call.
A number of separate counters are created. The purpose of this test is
to measure the cost of creating a new counter and introducing it to
the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Get counter value</term>
                <listitem><para>
This test measures the
<function>cyg_counter_current_value()</function> call. The current
value of each counter is obtained.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Set counter value</term>
                <listitem><para>
This test measures the <function>cyg_counter_set_value()</function>
call. Each counter is set to a new value.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tick counter</term>
                <listitem><para>
This test measures the <function>cyg_counter_tick()</function> call.
Each counter is &ldquo;ticked&rdquo; once.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Delete counter</term>
                <listitem><para>
This test measures the <function>cyg_counter_delete()</function> call.
Each counter is deleted from the system. The purpose of this test is
to measure the cost of deleting a counter object.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

          <refsect2 id="kernel-characterization-measure-alarms">
            <title>Alarms</title>
            <variablelist>
              <varlistentry>
                <term>Create alarm</term>
                <listitem><para>
This test measures the <function>cyg_alarm_create()</function> call. A
number of separate alarms are created, all attached to the same
counter object. The purpose of this test is to measure the cost of
creating a new counter and introducing it to the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Initialize alarm</term>
                <listitem><para>
This test measures the <function>cyg_alarm_initialize()</function>
call. Each alarm is initialized to a small value.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Disable alarm</term>
                <listitem><para>
This test measures the <function>cyg_alarm_disable()</function> call.
Each alarm is explicitly disabled.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Enable alarm</term>
                <listitem><para>
This test measures the <function>cyg_alarm_enable()</function> call.
Each alarm is explicitly enabled.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Delete alarm</term>
                <listitem><para>
This test measures the <function>cyg_alarm_delete()</function> call.
Each alarm is destroyed. The purpose of this test is to measure the
cost of deleting an alarm and removing it from the system.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tick counter &lsqb;1 alarm&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_counter_tick()</function> call. A
counter is created that has a single alarm attached to it. The purpose
of this test is to measure the cost of &ldquo;ticking&rdquo; a counter
when it has a single attached alarm. In this test, the alarm is not
activated (fired).
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tick counter &lsqb;many alarms&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_counter_tick()</function> call. A
counter is created that has multiple alarms attached to it. The
purpose of this test is to measure the cost of &ldquo;ticking&rdquo; a
counter when it has many attached alarms. In this test, the alarms are
not activated (fired).
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tick &amp; fire counter &lsqb;1 alarm&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_counter_tick()</function> call. A
counter is created that has a single alarm attached to it. The purpose
of this test is to measure the cost of &ldquo;ticking&rdquo; a counter
when it has a single attached alarm. In this test, the alarm is
activated (fired). Thus the measured time will include the overhead of
calling the alarm callback function.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Tick &amp; fire counter &lsqb;many alarms&rsqb;</term>
                <listitem><para>
This test measures the <function>cyg_counter_tick()</function> call. A
counter is created that has multiple alarms attached to it. The
purpose of this test is to measure the cost of &ldquo;ticking&rdquo; a
counter when it has many attached alarms. In this test, the alarms are
activated (fired). Thus the measured time will include the overhead of
calling the alarm callback function.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Alarm latency &lsqb;0 threads&rsqb;</term>
                <listitem><para>
This test attempts to measure the latency in calling an alarm callback
function. The time from the clock interrupt until the alarm function
is called is measured. In this test, there are no threads that can be
run, other than the system idle thread, when the clock interrupt
occurs (all threads are suspended).
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Alarm latency &lsqb;2 threads&rsqb;</term>
                <listitem><para>
This test attempts to measure the latency in calling an alarm callback
function. The time from the clock interrupt until the alarm function
is called is measured. In this test, there are exactly two threads
which are running when the clock interrupt occurs. They are simply
passing back and forth by way of the
<function>cyg_thread_yield()</function> call. The purpose of this test
is to measure the variations in the latency when there are executing
threads.
                </para></listitem>
              </varlistentry>
              <varlistentry>
                <term>Alarm latency &lsqb;many threads&rsqb;</term>
                <listitem><para>
This test attempts to measure the latency in calling an alarm callback
function. The time from the clock interrupt until the alarm function
is called is measured. In this test, there are a number of threads
which are running when the clock interrupt occurs. They are simply
passing back and forth by way of the
<function>cyg_thread_yield()</function> call. The purpose of this test
is to measure the variations in the latency when there are many
executing threads.
                </para></listitem>
              </varlistentry>
            </variablelist>
          </refsect2>

    </refsect1>

  </refentry>

<!-- }}} -->

</part>