1
  
2
  
3
  
4
  
5
  
6
  
7
  
8
  
9
  
10
  
11
  
12
  
13
  
14
  
15
  
16
  
17
  
18
  
19
  
20
  
21
  
22
  
23
  
24
  
25
  
26
  
27
  
28
  
29
  
30
  
31
  
32
  
33
  
34
  
35
  
36
  
37
  
38
  
39
  
40
  
41
  
42
  
43
  
44
  
45
  
46
  
47
  
48
  
49
  
50
  
51
  
52
  
53
  
54
  
55
  
56
  
57
  
58
  
59
  
60
  
61
  
62
  
63
  
64
  
65
  
66
  
67
  
68
  
69
  
70
  
71
  
72
  
73
  
74
  
75
  
76
  
77
  
78
  
79
  
80
  
81
  
82
  
83
  
84
  
85
  
86
  
87
  
88
  
89
  
90
  
91
  
92
  
93
  
94
  
95
  
96
  
97
  
98
  
99
  
100
  
101
  
102
  
103
  
104
  
105
  
106
  
107
  
108
  
109
  
110
  
111
  
112
  
113
  
114
  
115
  
116
  
117
  
118
  
119
  
120
  
121
  
122
  
123
  
124
  
125
  
126
  
127
  
128
  
129
  
130
  
131
  
132
  
133
  
134
  
135
  
136
  
137
  
138
  
139
  
140
  
141
  
142
  
143
  
144
  
145
  
146
  
147
  
148
  
149
  
150
  
151
  
152
  
153
  
154
  
155
  
156
  
157
  
158
  
159
  
160
  
161
  
162
  
163
  
164
  
165
  
166
  
167
  
168
  
169
  
170
  
171
  
172
  
173
  
174
  
175
  
176
  
177
  
178
  
179
  
180
  
181
  
182
  
183
  
184
  
185
  
186
  
187
  
188
  
189
  
190
  
191
  
192
  
193
  
194
  
195
  
196
  
197
  
198
  
199
  
200
  
201
  
202
  
203
  
204
  
205
  
206
  
207
  
208
  
209
  
210
  
211
  
212
  
213
  
214
  
215
  
216
  
217
  
218
  
219
  
220
  
221
  
222
  
223
  
224
  
225
  
226
  
227
  
228
  
229
  
230
  
231
  
232
  
233
  
234
  
235
  
236
  
237
  
238
  
239
  
240
  
241
  
242
  
243
  
244
  
245
  
246
  
247
  
248
  
249
  
250
  
251
  
252
  
253
  
254
  
255
  
256
  
257
  
258
  
259
  
260
  
261
  
262
  
263
  
264
  
265
  
266
  
267
  
268
  
269
  
270
  
271
  
272
  
273
  
274
  
275
  
276
  
277
  
278
  
279
  
280
  
281
  
282
  
283
  
284
  
285
  
286
  
287
  
288
  
289
  
290
  
291
  
292
  
293
  
294
  
295
  
296
  
297
  
298
  
299
  
300
  
301
  
302
  
303
  
304
  
305
  
306
  
307
  
308
  
309
  
310
  
311
  
312
  
313
  
314
  
315
  
316
  
317
  
318
  
319
  
320
  
321
  
322
  
323
  
324
  
325
  
326
  
327
  
328
  
329
  
330
  
331
  
332
  
333
  
334
  
335
  
336
  
337
  
338
  
339
  
340
  
341
  
342
  
343
  
344
  
345
  
346
  
347
  
348
  
349
  
350
  
351
  
352
  
353
  
354
  
355
  
356
  
357
  
358
  
359
  
360
  
361
  
362
  
363
  
364
  
365
  
366
  
367
  
368
  
369
  
370
  
371
  
372
  
373
  
374
  
375
  
376
  
377
  
378
  
379
  
380
  
381
  
382
  
383
  
384
  
385
  
386
  
387
  
388
  
389
  
390
  
391
  
392
  
393
  
394
  
395
  
396
  
397
  
398
  
399
  
400
  
401
  
402
  
403
  
404
  
405
  
406
  
407
  
408
  
409
  
410
  
411
  
412
  
413
  
414
  
415
  
416
  
417
  
418
  
419
  
420
  
421
  
422
  
423
  
424
  
425
  
426
  
427
  
428
  
429
  
430
  
431
  
432
  
433
  
434
  
435
  
436
  
437
  
438
  
439
  
440
  
441
  
442
  
443
  
444
  
445
  
446
  
447
  
448
  
449
  
450
  
451
  
452
  
453
  
454
  
455
  
456
  
457
  
458
  
459
  
460
  
461
  
462
  
463
  
464
  
465
  
466
  
467
  
468
  
469
  
470
  
471
  
472
  
473
  
474
  
475
  
476
  
477
  
478
  
479
  
480
  
481
  
482
  
483
  
484
  
485
  
486
  
487
  
488
  
489
  
490
  
491
  
492
  
493
  
494
  
495
  
496
  
497
  
498
  
499
  
500
  
501
  
502
  
503
  
504
  
505
  
506
  
507
  
508
  
509
  
510
  
511
  
512
  
513
  
514
  
515
  
516
  
517
  
518
  
519
  
520
  
521
  
522
  
523
  
524
  
525
  
526
  
527
  
528
  
529
  
530
  
531
  
532
  
533
  
534
  
535
  
536
  
537
  
538
  
539
  
540
  
541
  
542
  
543
  
544
  
545
  
546
  
547
  
548
  
549
  
550
  
551
  
552
  
553
  
554
  
555
  
556
  
557
  
558
  
559
  
560
  
561
  
562
  
563
  
564
  
565
  
566
  
567
  
568
  
569
  
570
  
571
  
572
  
573
  
574
  
575
  
576
  
577
  
578
  
579
  
580
  
581
  
582
  
583
  
584
  
585
  
586
  
587
  
588
  
589
  
590
  
591
  
592
  
593
  
594
  
595
  
596
  
597
  
598
  
599
  
600
  
601
  
602
  
603
  
604
  
605
  
606
  
607
  
608
  
609
  
610
  
611
  
612
  
613
  
614
  
615
  
616
  
617
  
618
  
619
  
620
  
621
  
622
  
623
  
624
  
625
  
626
  
627
  
628
  
629
  
630
  
631
  
632
  
633
  
634
  
635
  
636
  
637
  
638
  
639
  
640
  
641
  
642
  
643
  
644
  
645
  
646
  
647
  
648
  
649
  
650
  
651
  
652
  
653
  
654
  
655
  
656
  
657
  
658
  
659
  
660
  
661
  
662
  
663
  
664
  
665
  
666
  
667
  
668
  
669
  
670
  
671
  
672
  
673
  
674
  
675
  
676
  
677
  
678
  
679
  
680
  
681
  
682
  
683
  
684
  
685
  
686
  
687
  
688
  
689
  
690
  
691
  
692
  
693
  
694
  
695
  
696
  
697
  
698
  
699
  
700
  
701
  
702
  
703
  
704
  
705
  
706
  
707
  
708
  
709
  
710
  
711
  
712
  
713
  
714
  
715
  
716
  
717
  
718
  
719
  
720
  
721
  
722
  
723
  
724
  
725
  
726
  
727
  
728
  
729
  
730
  
731
  
732
  
733
  
734
  
735
  
736
  
737
  
738
  
739
  
740
  
741
  
742
  
743
  
744
  
745
  
746
  
747
  
748
  
749
  
750
  
751
  
752
  
753
  
754
  
755
  
756
  
757
  
758
  
759
  
760
  
761
  
762
  
763
  
764
  
765
  
766
  
767
  
768
  
769
  
770
  
771
  
772
  
773
  
774
  
775
  
776
  
777
  
778
  
779
  
780
  
781
  
782
  
783
  
784
  
785
  
786
  
787
  
788
  
789
  
790
  
791
  
792
  
793
  
794
  
795
  
796
  
797
  
798
  
799
  
800
  
801
  
802
  
803
  
804
  
805
  
806
  
807
  
808
  
809
  
810
  
811
  
812
  
813
  
814
  
815
  
816
  
817
  
818
  
819
  
820
  
821
  
822
  
823
  
824
  
825
  
826
  
827
  
828
  
829
  
830
  
831
  
832
  
833
  
834
  
835
  
836
  
837
  
838
  
839
  
840
  
841
  
842
  
843
  
844
  
845
  
846
  
847
  
848
  
849
  
850
  
851
  
852
  
853
  
854
  
855
  
856
  
857
  
858
  
859
  
860
  
861
  
862
  
863
  
864
  
865
  
866
  
867
  
868
  
869
  
870
  
871
  
872
  
873
  
874
  
875
  
876
  
877
  
878
  
879
  
880
  
881
  
882
  
883
  
884
  
885
  
886
  
887
  
888
  
889
  
890
  
891
  
892
  
893
  
894
  
895
  
896
  
897
  
898
  
899
  
900
  
901
  
902
  
903
  
904
  
905
  
906
  
907
  
908
  
909
  
910
  
911
  
912
  
913
  
914
  
915
  
916
  
917
  
918
  
919
  
920
  
921
  
922
  
923
  
924
  
925
  
926
  
927
  
928
  
929
  
930
  
931
  
932
  
933
  
934
  
935
  
936
  
937
  
938
  
939
  
940
  
941
  
942
  
943
  
944
  
945
  
946
  
947
  
948
  
949
  
950
  
951
  
952
  
953
  
954
  
955
  
956
  
957
  
958
  
959
  
960
  
961
  
962
  
963
  
964
  
965
  
966
  
967
  
968
  
969
  
970
  
971
  
972
  
973
  
974
  
975
  
976
  
977
  
978
  
979
  
980
  
981
  
982
  
983
  
984
  
985
  
986
  
987
  
988
  
989
  
990
  
991
  
992
  
993
  
994
  
995
  
996
  
997
  
998
  
999
  
1000
  
1001
  
1002
  
1003
  
1004
  
1005
  
1006
  
1007
  
1008
  
1009
  
1010
  
1011
  
1012
  
1013
  
1014
  
1015
  
1016
  
1017
  
1018
  
1019
  
1020
  
1021
  
1022
  
1023
  
1024
  
1025
  
1026
  
1027
  
1028
  
1029
  
1030
  
1031
  
1032
  
1033
  
1034
  
1035
  
1036
  
1037
  
1038
  
1039
  
1040
  
1041
  
1042
  
1043
  
1044
  
1045
  
1046
  
1047
  
1048
  
1049
  
1050
  
1051
  
1052
  
1053
  
1054
  
1055
  
1056
  
1057
  
1058
  
1059
  
1060
  
1061
  
1062
  
1063
  
1064
  
1065
  
1066
  
1067
  
1068
  
1069
  
1070
  
1071
  
1072
  
1073
  
1074
  
1075
  
1076
  
1077
  
1078
  
1079
  
1080
  
1081
  
1082
  
1083
  
1084
  
1085
  
1086
  
1087
  
1088
  
1089
  
1090
  
1091
  
1092
  
1093
  
1094
  
1095
  
1096
  
1097
  
1098
  
1099
  
1100
  
1101
  
1102
  
1103
  
1104
  
1105
  
1106
  
1107
  
1108
  
1109
  
1110
  
1111
  
1112
  
1113
  
1114
  
1115
  
1116
  
1117
  
1118
  
1119
  
1120
  
1121
  
1122
  
1123
  
1124
  
1125
  
1126
  
1127
  
1128
  
1129
  
1130
  
1131
  
1132
  
1133
  
1134
  
1135
  
1136
  
1137
  
1138
  
1139
  
1140
  
1141
  
1142
  
1143
  
1144
  
1145
  
1146
  
1147
  
1148
  
1149
  
1150
  
1151
  
1152
  
1153
  
1154
  
1155
  
1156
  
1157
  
1158
  
1159
  
1160
  
1161
  
1162
  
1163
  
1164
  
1165
  
1166
  
1167
  
1168
  
1169
  
1170
  
1171
  
1172
  
1173
  
1174
  
1175
  
1176
  
1177
  
1178
  
1179
  
1180
  
1181
  
1182
  
1183
  
1184
  
1185
  
1186
  
1187
  
1188
  
1189
  
1190
  
1191
  
1192
  
1193
  
1194
  
1195
  
1196
  
1197
  
1198
  
1199
  
1200
  
1201
  
1202
  
1203
  
1204
  
1205
  
1206
  
1207
  
1208
  
1209
  
1210
  
1211
  
1212
  
1213
  
1214
  
1215
  
1216
  
1217
  
1218
  
1219
  
1220
  
1221
  
1222
  
1223
  
1224
  
1225
  
1226
  
1227
  
1228
  
1229
  
1230
  
1231
  
1232
  
1233
  
1234
  
1235
  
1236
  
1237
  
1238
  
1239
  
1240
  
1241
  
1242
  
1243
  
1244
  
1245
  
1246
  
1247
  
1248
  
1249
  
1250
  
1251
  
1252
  
1253
  
1254
  
1255
  
1256
  
1257
  
1258
  
1259
  
1260
  
1261
  
1262
  
1263
  
1264
  
1265
  
1266
  
1267
  
1268
  
1269
  
1270
  
1271
  
1272
  
1273
  
1274
  
1275
  
1276
  
1277
  
1278
  
1279
  
1280
  
1281
  
1282
  
1283
  
1284
  
1285
  
1286
  
1287
  
1288
  
1289
  
1290
  
1291
  
1292
  
1293
  
1294
  
1295
  
1296
  
1297
  
1298
  
1299
  
1300
  
1301
  
1302
  
1303
  
1304
  
1305
  
1306
  
1307
  
1308
  
1309
  
1310
  
1311
  
1312
  
1313
  
1314
  
1315
  
1316
  
1317
  
1318
  
1319
  
1320
  
1321
  
1322
  
1323
  
1324
  
1325
  
1326
  
1327
  
1328
  
1329
  
1330
  
1331
  
1332
  
1333
  
1334
  
1335
  
1336
  
1337
  
1338
  
1339
  
1340
  
1341
  
1342
  
1343
  
1344
  
1345
  
1346
  
1347
  
1348
  
1349
  
1350
  
1351
  
1352
  
1353
  
1354
  
1355
  
1356
  
1357
  
1358
  
1359
  
1360
  
1361
  
1362
  
1363
  
1364
  
1365
  
1366
  
1367
  
1368
  
1369
  
1370
  
1371
  
1372
  
1373
  
1374
  
1375
  
1376
  
1377
  
1378
  
1379
  
1380
  
1381
  
1382
  
1383
  
1384
  
1385
  
1386
  
1387
  
1388
  
1389
  
1390
  
1391
  
1392
  
1393
  
1394
  
1395
  
1396
  
1397
  
1398
  
1399
  
1400
  
1401
  
1402
  
1403
  
1404
  
1405
  
1406
  
1407
  
1408
  
1409
  
1410
  
1411
  
1412
  
1413
  
1414
  
1415
  
1416
  
1417
  
1418
  
1419
  
1420
  
1421
  
1422
  
1423
  
1424
  
1425
  
1426
  
1427
  
1428
  
1429
  
1430
  
1431
  
1432
  
1433
  
1434
  
1435
  
1436
  
1437
  
1438
  
1439
  
1440
  
1441
  
1442
  
1443
  
1444
  
1445
  
1446
  
1447
  
1448
  
1449
  
1450
  
1451
  
1452
  
1453
  
1454
  
1455
  
1456
  
1457
  
1458
  
1459
  
1460
  
1461
  
1462
  
1463
  
1464
  
1465
  
1466
  
1467
  
1468
  
1469
  
1470
  
1471
  
1472
  
1473
  
1474
  
1475
  
1476
  
1477
  
1478
  
1479
  
1480
  
1481
  
1482
  
1483
  
1484
  
1485
  
1486
  
1487
  
1488
  
1489
  
1490
  
1491
  
1492
  
1493
  
1494
  
1495
  
1496
  
1497
  
1498
  
1499
  
1500
  
1501
  
1502
  
1503
  
1504
  
1505
  
1506
  
1507
  
1508
  
1509
  
1510
  
1511
  
1512
  
1513
  
1514
  
1515
  
1516
  
1517
  
1518
  
1519
  
1520
  
1521
  
1522
  
1523
  
1524
  
1525
  
1526
  
1527
  
1528
  
1529
  
1530
  
1531
  
1532
  
1533
  
1534
  
1535
  
1536
  
1537
  
1538
  
1539
  
1540
  
1541
  
1542
  
1543
  
1544
  
1545
  
1546
  
1547
  
1548
  
1549
  
1550
  
1551
  
1552
  
1553
  
1554
  
1555
  
1556
  
1557
  
1558
  
1559
  
1560
  
1561
  
1562
  
1563
  
1564
  
1565
  
1566
  
1567
  
1568
  
1569
  
1570
  
1571
  
1572
  
1573
  
1574
  
1575
  
1576
  
1577
  
1578
  
1579
  
1580
  
1581
  
1582
  
1583
  
1584
  
1585
  
1586
  
1587
  
1588
  
1589
  
1590
  
1591
  
1592
  
1593
  
1594
  
1595
  
1596
  
1597
  
1598
  
1599
  
1600
  
1601
  
1602
  
1603
  
1604
  
1605
  
1606
  
1607
  
1608
  
1609
  
1610
  
1611
  
1612
  
1613
  
1614
  
1615
  
1616
  
1617
  
1618
  
1619
  
1620
  
1621
  
1622
  
1623
  
1624
  
1625
  
1626
  
1627
  
1628
  
1629
  
1630
  
1631
  
1632
  
1633
  
1634
  
1635
  
1636
  
1637
  
1638
  
1639
  
1640
  
1641
  
1642
  
1643
  
1644
  
1645
  
1646
  
1647
  
1648
  
1649
  
1650
  
1651
  
1652
  
1653
  
1654
  
1655
  
1656
  
1657
  
1658
  
1659
  
1660
  
1661
  
1662
  
1663
  
1664
  
1665
  
1666
  
1667
  
1668
  
1669
  
1670
  
1671
  
1672
  
1673
  
1674
  
1675
  
1676
  
1677
  
1678
  
1679
  
1680
  
1681
  
1682
  
1683
  
1684
  
1685
  
1686
  
1687
  
1688
  
1689
  
1690
  
1691
  
1692
  
1693
  
1694
  
1695
  
1696
  
1697
  
1698
  
1699
  
1700
  
1701
  
1702
  
1703
  
1704
  
1705
  
1706
  
1707
  
1708
  
1709
  
1710
  
1711
  
1712
  
1713
  
1714
  
1715
  
1716
  
1717
  
1718
  
1719
  
1720
  
1721
  
1722
  
1723
  
1724
  
1725
  
1726
  
1727
  
1728
  
1729
  
1730
  
1731
  
1732
  
1733
  
1734
  
1735
  
1736
  
1737
  
1738
  
1739
  
1740
  
1741
  
1742
  
1743
  
1744
  
1745
  
1746
  
1747
  
1748
  
1749
  
1750
  
1751
  
1752
  
1753
  
1754
  
1755
  
1756
  
1757
  
1758
  
1759
  
1760
  
1761
  
1762
  
1763
  
1764
  
1765
  
1766
  
1767
  
1768
  
1769
  
1770
  
1771
  
1772
  
1773
  
1774
  
1775
  
1776
  
1777
  
1778
  
1779
  
1780
  
1781
  
1782
  
1783
  
1784
  
1785
  
1786
  
1787
  
1788
  
1789
  
1790
  
1791
  
1792
  
1793
  
1794
  
1795
  
1796
  
1797
  
1798
  
1799
  
1800
  
1801
  
1802
  
1803
  
1804
  
1805
  
1806
  
1807
  
1808
  
1809
  
1810
  
1811
  
1812
  
1813
  
1814
  
1815
  
1816
  
1817
  
1818
  
1819
  
1820
  
1821
  
1822
  
1823
  
1824
  
1825
  
1826
  
1827
  
1828
  
1829
  
1830
  
1831
  
1832
  
1833
  
1834
  
1835
  
1836
  
1837
  
1838
  
1839
  
1840
  
1841
  
1842
  
1843
  
1844
  
1845
  
1846
  
1847
  
1848
  
1849
  
1850
  
1851
  
1852
  
1853
  
1854
  
1855
  
1856
  
1857
  
1858
  
1859
  
1860
  
1861
  
1862
  
1863
  
1864
  
1865
  
1866
  
1867
  
1868
  
1869
  
1870
  
1871
  
1872
  
1873
  
1874
  
1875
  
1876
  
1877
  
1878
  
1879
  
1880
  
1881
  
1882
  
1883
  
1884
  
1885
  
1886
  
1887
  
1888
  
1889
  
1890
  
1891
  
1892
  
1893
  
1894
  
1895
  
1896
  
1897
  
1898
  
1899
  
1900
  
1901
  
1902
  
1903
  
1904
  
1905
  
1906
  
1907
  
1908
  
1909
  
1910
  
1911
  
1912
  
1913
  
1914
  
1915
  
1916
  
1917
  
1918
  
1919
  
1920
  
1921
  
1922
  
1923
  
1924
  
1925
  
1926
  
1927
  
1928
  
1929
  
1930
  
1931
  
1932
  
1933
  
1934
  
1935
  
1936
  
1937
  
1938
  
1939
  
1940
  
1941
  
1942
  
1943
  
1944
  
1945
  
1946
  
1947
  
1948
  
1949
  
1950
  
1951
  
1952
  
1953
  
1954
  
1955
  
1956
  
1957
  
1958
  
1959
  
1960
  
1961
  
1962
  
1963
  
1964
  
1965
  
1966
  
1967
  
1968
  
1969
  
1970
  
1971
  
1972
  
1973
  
1974
  
1975
  
1976
  
1977
  
1978
  
1979
  
1980
  
1981
  
1982
  
1983
  
1984
  
1985
  
1986
  
1987
  
1988
  
1989
  
1990
  
1991
  
1992
  
1993
  
1994
  
1995
  
1996
  
1997
  
1998
  
1999
  
2000
  
2001
  
2002
  
2003
  
2004
  
2005
  
2006
  
2007
  
2008
  
2009
  
2010
  
2011
  
2012
  
2013
  
2014
  
2015
  
2016
  
2017
  
2018
  
2019
  
2020
  
2021
  
2022
  
2023
  
2024
  
2025
  
2026
  
2027
  
2028
  
2029
  
2030
  
2031
  
2032
  
2033
  
2034
  
2035
  
2036
  
2037
  
2038
  
2039
  
2040
  
2041
  
2042
  
2043
  
2044
  
2045
  
2046
  
2047
  
2048
  
2049
  
2050
  
2051
  
2052
  
2053
  
2054
  
2055
  
2056
  
2057
  
2058
  
2059
  
2060
  
2061
  
2062
  
2063
  
2064
  
2065
  
2066
  
2067
  
2068
  
2069
  
2070
  
2071
  
2072
  
2073
  
2074
  
2075
  
2076
  
2077
  
2078
  
2079
  
2080
  
2081
  
2082
  
2083
  
2084
  
2085
  
2086
  
2087
  
2088
  
2089
  
2090
  
2091
  
2092
  
2093
  
2094
  
2095
  
2096
  
2097
  
2098
  
2099
  
2100
  
2101
  
2102
  
2103
  
2104
  
2105
  
2106
  
2107
  
2108
  
2109
  
2110
  
2111
  
2112
  
2113
  
2114
  
2115
  
2116
  
2117
  
2118
  
2119
  
2120
  
2121
  
2122
  
2123
  
2124
  
2125
  
2126
  
2127
  
2128
  
2129
  
2130
  
2131
  
2132
  
2133
  
2134
  
2135
  
2136
  
2137
  
2138
  
2139
  
2140
  
2141
  
2142
  
2143
  
2144
  
2145
  
2146
  
2147
  
2148
  
2149
  
2150
  
2151
  
2152
  
2153
  
2154
  
2155
  
2156
  
2157
  
2158
  
2159
  
2160
  
2161
  
2162
  
2163
  
2164
  
2165
  
2166
  
2167
  
2168
  
2169
  
2170
  
2171
  
2172
  
2173
  
2174
  
2175
  
2176
  
2177
  
2178
  
2179
  
2180
  
2181
  
2182
  
2183
  
2184
  
2185
  
2186
  
2187
  
2188
  
2189
  
2190
  
2191
  
2192
  
2193
  
2194
  
2195
  
2196
  
2197
  
2198
  
2199
  
2200
  
2201
  
2202
  
2203
  
2204
  
2205
  
2206
  
2207
  
2208
  
2209
  
2210
  
2211
  
2212
  
2213
  
2214
  
2215
  
2216
  
2217
  
2218
  
2219
  
2220
  
2221
  
2222
  
2223
  
2224
  
2225
  
2226
  
2227
  
2228
  
2229
  
2230
  
2231
  
2232
  
2233
  
2234
  
2235
  
2236
  
2237
  
2238
  
2239
  
2240
  
2241
  
2242
  
2243
  
2244
  
2245
  
2246
  
2247
  
2248
  
2249
  
2250
  
2251
  
2252
  
2253
  
2254
  
2255
  
2256
  
2257
  
2258
  
2259
  
2260
  
2261
  
2262
  
2263
  
2264
  
2265
  
2266
  
2267
  
2268
  
2269
  
2270
  
2271
  
2272
  
2273
  
2274
  
2275
  
2276
  
2277
  
2278
  
2279
  
2280
  
2281
  
2282
  
2283
  
2284
  
2285
  
2286
  
2287
  
2288
  
2289
  
2290
  
2291
  
2292
  
2293
  
2294
  
2295
  
2296
  
2297
  
2298
  
2299
  
2300
  
2301
  
2302
  
2303
  
2304
  
2305
  
2306
  
2307
  
2308
  
2309
  
2310
  
2311
  
2312
  
2313
  
2314
  
2315
  
2316
  
2317
  
2318
  
2319
  
2320
  
2321
  
2322
  
2323
  
2324
  
2325
  
2326
  
2327
  
2328
  
2329
  
2330
  
2331
  
2332
  
2333
  
2334
  
2335
  
2336
  
2337
  
2338
  
2339
  
2340
  
2341
  
2342
  
2343
  
2344
  
2345
  
2346
  
2347
  
2348
  
2349
  
2350
  
2351
  
2352
  
2353
  
2354
  
2355
  
2356
  
2357
  
2358
  
2359
  
2360
  
2361
  
2362
  
2363
  
2364
  
2365
  
2366
  
2367
  
2368
  
2369
  
2370
  
2371
  
2372
  
2373
  
2374
  
2375
  
2376
  
2377
  
2378
  
2379
  
2380
  
2381
  
2382
  
2383
  
2384
  
2385
  
2386
  
2387
  
2388
  
2389
  
2390
  
2391
  
2392
  
2393
  
2394
  
2395
  
2396
  
2397
  
2398
  
2399
  
2400
  
2401
  
2402
  
2403
  
2404
  
2405
  
2406
  
2407
  
2408
  
2409
  
2410
  
2411
  
2412
  
2413
  
2414
  
2415
  
2416
  
2417
  
2418
  
2419
  
2420
  
2421
  
2422
  
2423
  
2424
  
2425
  
2426
  
2427
  
2428
  
2429
  
2430
  
2431
  
2432
  
2433
  
2434
  
2435
  
2436
  
2437
  
2438
  
2439
  
2440
  
2441
  
2442
  
2443
  
2444
  
2445
  
2446
  
2447
  
2448
  
2449
  
2450
  
2451
  
2452
  
2453
  
2454
  
2455
  
2456
  
2457
  
2458
  
2459
  
2460
  
2461
  
2462
  
2463
  
2464
  
2465
  
2466
  
2467
  
2468
  
2469
  
2470
  
2471
  
2472
  
2473
  
2474
  
2475
  
2476
  
2477
  
2478
  
2479
  
2480
  
2481
  
2482
  
2483
  
2484
  
2485
  
2486
  
2487
  
2488
  
2489
  
2490
  
2491
  
2492
  
2493
  
2494
  
2495
  
2496
  
2497
  
2498
  
2499
  
2500
  
2501
  
2502
  
2503
  
2504
  
2505
  
2506
  
2507
  
2508
  
2509
  
2510
  
2511
  
2512
  
2513
  
2514
  
2515
  
2516
  
2517
  
2518
  
2519
  
2520
  
2521
  
2522
  
2523
  
2524
  
2525
  
2526
  
2527
  
2528
  
2529
  
2530
  
2531
  
2532
  
2533
  
2534
  
2535
  
2536
  
2537
  
2538
  
2539
  
2540
  
2541
  
2542
  
2543
  
2544
  
2545
  
2546
  
2547
  
2548
  
2549
  
2550
  
2551
  
2552
  
2553
  
2554
  
2555
  
2556
  
2557
  
2558
  
2559
  
2560
  
2561
  
2562
  
2563
  
2564
  
2565
  
2566
  
2567
  
2568
  
2569
  
2570
  
2571
  
2572
  
2573
  
2574
  
2575
  
2576
  
2577
  
2578
  
2579
  
2580
  
2581
  
2582
  
2583
  
2584
  
2585
  
2586
  
2587
  
2588
  
2589
  
2590
  
2591
  
2592
  
2593
  
2594
  
2595
  
2596
  
2597
  
2598
  
2599
  
2600
  
2601
  
2602
  
2603
  
2604
  
2605
  
2606
  
2607
  
2608
  
2609
  
2610
  
2611
  
2612
  
2613
  
2614
  
2615
  
2616
  
2617
  
2618
  
2619
  
2620
  
2621
  
2622
  
2623
  
2624
  
2625
  
2626
  
2627
  
2628
  
2629
  
2630
  
2631
  
2632
  
2633
  
2634
  
2635
  
2636
  
2637
  
2638
  
2639
  
2640
  
2641
  
2642
  
2643
  
2644
  
2645
  
2646
  
2647
  
2648
  
2649
  
2650
  
2651
  
2652
  
2653
  
2654
  
2655
  
2656
  
2657
  
2658
  
2659
  
2660
  
2661
  
2662
  
2663
  
2664
  
2665
  
2666
  
2667
  
2668
  
2669
  
2670
  
2671
  
2672
  
2673
  
2674
  
2675
  
2676
  
2677
  
2678
  
2679
  
2680
  
2681
  
2682
  
2683
  
2684
  
2685
  
2686
  
2687
  
2688
  
2689
  
2690
  
2691
  
2692
  
2693
  
2694
  
2695
  
2696
  
2697
  
2698
  
2699
  
2700
  
2701
  
2702
  
2703
  
2704
  
2705
  
2706
  
2707
  
2708
  
2709
  
2710
  
2711
  
2712
  
2713
  
2714
  
2715
  
2716
  
2717
  
2718
  
2719
  
2720
  
2721
  
2722
  
2723
  
2724
  
2725
  
2726
  
2727
  
2728
  
2729
  
2730
  
2731
  
2732
  
2733
  
2734
  
2735
  
2736
  
2737
  
2738
  
2739
  
2740
  
2741
  
2742
  
2743
  
2744
  
2745
  
2746
  
2747
  
2748
  
2749
  
2750
  
2751
  
2752
  
2753
  
2754
  
2755
  
2756
  
2757
  
2758
  
2759
  
2760
  
2761
  
2762
  
2763
  
2764
  
2765
  
2766
  
2767
  
2768
  
2769
  
2770
  
2771
  
2772
  
2773
  
2774
  
2775
  
2776
  
2777
  
2778
  
2779
  
2780
  
2781
  
2782
  
2783
  
2784
  
2785
  
2786
  
2787
  
2788
  
2789
  
2790
  
2791
  
2792
  
2793
  
2794
  
2795
  
2796
  
2797
  
2798
  
2799
  
2800
  
2801
  
2802
  
2803
  
2804
  
2805
  
2806
  
2807
  
2808
  
2809
  
2810
  
2811
  
2812
  
2813
  
2814
  
2815
  
2816
  
2817
  
2818
  
2819
  
2820
  
2821
  
2822
  
2823
  
2824
  
2825
  
2826
  
2827
  
2828
  
2829
  
2830
  
2831
  
2832
  
2833
  
2834
  
2835
  
2836
  
2837
  
2838
  
2839
  
2840
  
2841
  
2842
  
2843
  
2844
  
2845
  
2846
  
2847
  
2848
  
2849
  
2850
  
2851
  
2852
  
2853
  
2854
  
2855
  
2856
  
2857
  
2858
  
2859
  
2860
  
2861
  
2862
  
2863
  
2864
  
2865
  
2866
  
2867
  
2868
  
2869
  
2870
  
2871
  
2872
  
2873
  
2874
  
2875
  
2876
  
2877
  
2878
  
2879
  
2880
  
2881
  
2882
  
2883
  
2884
  
2885
  
2886
  
2887
  
2888
  
2889
  
2890
  
2891
  
2892
  
2893
  
2894
  
2895
  
2896
  
2897
  
2898
  
2899
  
2900
  
2901
  
2902
  
2903
  
2904
  
2905
  
2906
  
2907
  
2908
  
2909
  
2910
  
2911
  
2912
  
2913
  
2914
  
2915
  
2916
  
2917
  
2918
  
2919
  
2920
  
2921
  
2922
  
2923
  
2924
  
2925
  
2926
  
2927
  
2928
  
2929
  
2930
  
2931
  
2932
  
2933
  
2934
  
2935
  
2936
  
2937
  
2938
  
2939
  
2940
  
2941
  
2942
  
2943
  
2944
  
2945
  
2946
  
2947
  
2948
  
2949
  
2950
  
2951
  
2952
  
2953
  
2954
  
2955
  
2956
  
2957
  
2958
  
2959
  
2960
  
2961
  
2962
  
2963
  
2964
  
2965
  
2966
  
2967
  
2968
  
2969
  
2970
  
2971
  
2972
  
2973
  
2974
  
2975
  
2976
  
2977
  
2978
  
2979
  
2980
  
2981
  
2982
  
2983
  
2984
  
2985
  
2986
  
2987
  
2988
  
2989
  
2990
  
2991
  
2992
  
2993
  
2994
  
2995
  
2996
  
2997
  
2998
  
2999
  
3000
  
3001
  
3002
  
3003
  
3004
  
3005
  
3006
  
3007
  
3008
  
3009
  
3010
  
3011
  
3012
  
3013
  
3014
  
3015
  
3016
  
3017
  
3018
  
3019
  
3020
  
3021
  
3022
  
3023
  
3024
  
3025
  
3026
  
3027
  
3028
  
3029
  
3030
  
3031
  
3032
  
3033
  
3034
  
3035
  
3036
  
3037
  
3038
  
3039
  
3040
  
3041
  
3042
  
3043
  
3044
  
3045
  
3046
  
3047
  
3048
  
3049
  
3050
  
3051
  
3052
  
3053
  
3054
  
3055
  
3056
  
3057
  
3058
  
3059
  
3060
  
3061
  
3062
  
3063
  
3064
  
3065
  
3066
  
3067
  
3068
  
3069
  
3070
  
3071
  
3072
  
3073
  
3074
  
3075
  
3076
  
3077
  
3078
  
3079
  
3080
  
3081
  
3082
  
3083
  
3084
  
3085
  
3086
  
3087
  
3088
  
3089
  
3090
  
3091
  
3092
  
3093
  
3094
  
3095
  
3096
  
3097
  
3098
  
3099
  
3100
  
3101
  
3102
  
3103
  
3104
  
3105
  
3106
  
3107
  
3108
  
3109
  
3110
  
3111
  
3112
  
3113
  
3114
  
3115
  
3116
  
3117
  
3118
  
3119
  
3120
  
3121
  
3122
  
3123
  
3124
  
3125
  
3126
  
3127
  
3128
  
3129
  
3130
  
3131
  
3132
  
3133
  
3134
  
3135
  
3136
  
3137
  
3138
  
3139
  
3140
  
3141
  
3142
  
3143
  
3144
  
3145
  
3146
  
3147
  
3148
  
3149
  
3150
  
3151
  
3152
  
3153
  
3154
  
3155
  
3156
  
3157
  
3158
  
3159
  
3160
  
3161
  
3162
  
3163
  
3164
  
3165
  
3166
  
3167
  
3168
  
3169
  
3170
  
3171
  
3172
  
3173
  
3174
  
3175
  
3176
  
3177
  
3178
  
3179
  
3180
  
3181
  
3182
  
3183
  
3184
  
3185
  
3186
  
3187
  
3188
  
3189
  
3190
  
3191
  
3192
  
3193
  
3194
  
3195
  
3196
  
3197
  
3198
  
3199
  
3200
  
3201
  
3202
  
3203
  
3204
  
3205
  
3206
  
3207
  
3208
  
3209
  
3210
  
3211
  
3212
  
3213
  
3214
  
3215
  
3216
  
3217
  
3218
  
3219
  
3220
  
3221
  
3222
  
3223
  
3224
  
3225
  
3226
  
3227
  
3228
  
3229
  
3230
  
3231
  
3232
  
3233
  
3234
  
3235
  
3236
  
3237
  
3238
  
3239
  
3240
  
3241
  
3242
  
3243
  
3244
  
3245
  
3246
  
3247
  
3248
  
3249
  
3250
  
3251
  
3252
  
3253
  
3254
  
3255
  
3256
  
3257
  
3258
  
3259
  
3260
  
3261
  
3262
  
3263
  
3264
  
3265
  
3266
  
3267
  
3268
  
3269
  
3270
  
3271
  
3272
  
3273
  
3274
  
3275
  
3276
  
3277
  
3278
  
3279
  
3280
  
3281
  
3282
  
3283
  
3284
  
3285
  
3286
  
3287
  
3288
  
3289
  
3290
  
3291
  
3292
  
3293
  
3294
  
3295
  
3296
  
3297
  
3298
  
3299
  
3300
  
3301
  
3302
  
3303
  
3304
  
3305
  
3306
  
3307
  
3308
  
3309
  
3310
  
3311
  
3312
  
3313
  
3314
  
3315
  
3316
  
3317
  
3318
  
3319
  
3320
  
3321
  
3322
  
3323
  
3324
  
3325
  
3326
  
3327
  
3328
  
3329
  
3330
  
3331
  
3332
  
3333
  
3334
  
3335
  
3336
  
3337
  
3338
  
3339
  
3340
  
3341
  
3342
  
3343
  
3344
  
3345
  
3346
  
3347
  
3348
  
3349
  
3350
  
3351
  
3352
  
3353
  
3354
  
3355
  
3356
  
3357
  
3358
  
3359
  
3360
  
3361
  
3362
  
3363
  
3364
  
3365
  
3366
  
3367
  
3368
  
3369
  
3370
  
3371
  
3372
  
3373
  
3374
  
3375
  
3376
  
3377
  
3378
  
3379
  
3380
  
3381
  
3382
  
3383
  
3384
  
3385
  
3386
  
3387
  
3388
  
3389
  
3390
  
3391
  
3392
  
3393
  
3394
  
3395
  
3396
  
3397
  
3398
  
3399
  
3400
  
3401
  
3402
  
3403
  
3404
  
3405
  
3406
  
3407
  
3408
  
3409
  
3410
  
3411
  
3412
  
3413
  
3414
  
3415
  
3416
  
3417
  
3418
  
3419
  
3420
  
3421
  
3422
  
3423
  
3424
  
3425
  
3426
  
3427
  
3428
  
3429
  
3430
  
3431
  
3432
  
3433
  
3434
  
3435
  
3436
  
3437
  
3438
  
3439
  
3440
  
3441
  
3442
  
3443
  
3444
  
3445
  
3446
  
3447
  
3448
  
3449
  
3450
  
3451
  
3452
  
3453
  
3454
  
3455
  
3456
  
3457
  
3458
  
3459
  
3460
  
3461
  
3462
  
3463
  
3464
  
3465
  
3466
  
3467
  
3468
  
3469
  
3470
  
3471
  
3472
  
3473
  
3474
  
3475
  
3476
  
3477
  
3478
  
3479
  
3480
  
3481
  
3482
  
3483
  
3484
  
3485
  
3486
  
3487
  
3488
  
3489
  
3490
  
3491
  
3492
  
3493
  
3494
  
3495
  
3496
  
3497
  
3498
  
3499
  
3500
  
3501
  
3502
  
3503
  
3504
  
3505
  
3506
  
3507
  
3508
  
3509
  
3510
  
3511
  
3512
  
3513
  
3514
  
3515
  
3516
  
3517
  
3518
  
3519
  
3520
  
3521
  
3522
  
3523
  
3524
  
3525
  
3526
  
3527
  
3528
  
3529
  
3530
  
3531
  
3532
  
3533
  
3534
  
3535
  
3536
  
3537
  
3538
  
3539
  
3540
  
3541
  
3542
  
3543
  
3544
  
3545
  
3546
  
3547
  
3548
  
3549
  
3550
  
3551
  
3552
  
3553
  
3554
  
3555
  
3556
  
3557
  
3558
  
3559
  
3560
  
3561
  
3562
  
3563
  
3564
  
3565
  
3566
  
3567
  
3568
  
3569
  
3570
  
3571
  
3572
  
3573
  
3574
  
3575
  
3576
  
3577
  
3578
  
3579
  
3580
  
3581
  
3582
  
3583
  
3584
  
3585
  
3586
  
3587
  
3588
  
3589
  
3590
  
3591
  
3592
  
3593
  
3594
  
3595
  
3596
  
3597
  
3598
  
3599
  
3600
  
3601
  
3602
  
3603
  
3604
  
3605
  
3606
  
3607
  
3608
  
3609
  
3610
  
3611
  
3612
  
3613
  
3614
  
3615
  
3616
  
3617
  
3618
  
3619
  
3620
  
3621
  
3622
  
3623
  
3624
  
3625
  
3626
  
3627
  
3628
  
3629
  
3630
  
3631
  
3632
  
3633
  
3634
  
3635
  
3636
  
3637
  
3638
  
3639
  
3640
  
3641
  
3642
  
3643
  
3644
  
3645
  
3646
  
3647
  
3648
  
3649
  
3650
  
3651
  
3652
  
3653
  
3654
  
3655
  
3656
  
3657
  
3658
  
3659
  
3660
  
3661
  
3662
  
3663
  
3664
  
3665
  
3666
  
3667
  
3668
  
3669
  
3670
  
3671
  
3672
  
3673
  
3674
  
3675
  
3676
  
3677
  
3678
  
3679
  
3680
  
3681
  
3682
  
3683
  
3684
  
3685
  
3686
  
3687
  
3688
  
3689
  
3690
  
3691
  
3692
  
3693
  
3694
  
3695
  
3696
  
3697
  
3698
  
3699
  
3700
  
3701
  
3702
  
3703
  
3704
  
3705
  
3706
  
3707
  
3708
  
3709
  
3710
  
3711
  
3712
  
3713
  
3714
  
3715
  
3716
  
3717
  
3718
  
3719
  
3720
  
3721
  
3722
  
3723
  
3724
  
3725
  
3726
  
3727
  
3728
  
3729
  
3730
  
3731
  
3732
  
3733
  
3734
  
3735
  
3736
  
3737
  
3738
  
3739
  
3740
  
3741
  
3742
  
3743
  
3744
  
3745
  
3746
  
3747
  
3748
  
3749
  
3750
  
3751
  
3752
  
3753
  
3754
  
3755
  
3756
  
3757
  
3758
  
3759
  
3760
  
3761
  
3762
  
3763
  
3764
  
3765
  
3766
  
3767
  
3768
  
3769
  
3770
  
3771
  
3772
  
3773
  
3774
  
3775
  
3776
  
3777
  
3778
  
3779
  
3780
  
3781
  
3782
  
3783
  
3784
  
3785
  
3786
  
3787
  
3788
  
3789
  
3790
  
3791
  
3792
  
3793
  
3794
  
3795
  
3796
  
3797
  
3798
  
3799
  
3800
  
3801
  
3802
  
3803
  
3804
  
3805
  
3806
  
3807
  
3808
  
3809
  
3810
  
3811
  
3812
  
3813
  
3814
  
3815
  
3816
  
3817
  
3818
  
3819
  
3820
  
3821
  
3822
  
3823
  
3824
  
3825
  
3826
  
3827
  
3828
  
3829
  
3830
  
3831
  
3832
  
3833
  
3834
  
3835
  
3836
  
3837
  
3838
  
3839
  
3840
  
3841
  
3842
  
3843
  
3844
  
3845
  
3846
  
3847
  
3848
  
3849
  
3850
  
3851
  
3852
  
3853
  
3854
  
3855
  
3856
  
3857
  
3858
  
3859
  
3860
  
3861
  
3862
  
3863
  
3864
  
3865
  
3866
  
3867
  
3868
  
3869
  
3870
  
3871
  
3872
  
3873
  
3874
  
3875
  
3876
  
3877
  
3878
  
3879
  
3880
  
3881
  
3882
  
3883
  
3884
  
3885
  
3886
  
3887
  
3888
  
3889
  
3890
  
3891
  
3892
  
3893
  
3894
  
3895
  
3896
  
3897
  
3898
  
3899
  
3900
  
3901
  
3902
  
3903
  
3904
  
3905
  
3906
  
3907
  
3908
  
3909
  
3910
  
3911
  
3912
  
3913
  
3914
  
3915
  
3916
  
3917
  
3918
  
3919
  
3920
  
3921
  
3922
  
3923
  
3924
  
3925
  
3926
  
3927
  
3928
  
3929
  
3930
  
3931
  
3932
  
3933
  
3934
  
3935
  
3936
  
3937
  
3938
  
3939
  
3940
  
3941
  
3942
  
3943
  
3944
  
3945
  
3946
  
3947
  
3948
  
3949
  
3950
  
3951
  
3952
  
3953
  
3954
  
3955
  
3956
  
3957
  
3958
  
3959
  
3960
  
3961
  
3962
  
3963
  
3964
  
3965
  
3966
  
3967
  
3968
  
3969
  
3970
  
3971
  
3972
  
3973
  
3974
  
3975
  
3976
  
3977
  
3978
  
3979
  
3980
  
3981
  
3982
  
3983
  
3984
  
3985
  
3986
  
3987
  
3988
  
3989
  
3990
  
3991
  
3992
  
3993
  
3994
  
3995
  
3996
  
3997
  
3998
  
3999
  
4000
  
4001
  
4002
  
4003
  
4004
  
4005
  
4006
  
4007
  
4008
  
4009
  
4010
  
4011
  
4012
  
4013
  
4014
  
4015
  
4016
  
4017
  
4018
  
4019
  
4020
  
4021
  
4022
  
4023
  
4024
  
4025
  
4026
  
4027
  
4028
  
4029
  
4030
  
4031
  
4032
  
4033
  
4034
  
4035
  
4036
  
4037
  
4038
  
4039
  
4040
  
4041
  
4042
  
4043
  
4044
  
4045
  
4046
  
4047
  
4048
  
4049
  
4050
  
4051
  
4052
  
4053
  
4054
  
4055
  
4056
  
4057
  
4058
  
4059
  
4060
  
4061
  
4062
  
4063
  
4064
  
4065
  
4066
  
4067
  
4068
  
4069
  
4070
  
4071
  
4072
  
4073
  
4074
  
4075
  
4076
  
4077
  
4078
  
4079
  
4080
  
4081
  
4082
  
4083
  
4084
  
4085
  
4086
  
4087
  
4088
  
4089
  
4090
  
4091
  
4092
  
4093
  
4094
  
4095
  
4096
  
4097
  
4098
  
4099
  
4100
  
4101
  
4102
  
4103
  
4104
  
4105
  
4106
  
4107
  
4108
  
4109
  
4110
  
4111
  
4112
  
4113
  
4114
  
4115
  
4116
  
4117
  
4118
  
4119
  
4120
  
4121
  
4122
  
4123
  
4124
  
4125
  
4126
  
4127
  
4128
  
4129
  
4130
  
4131
  
4132
  
4133
  
4134
  
4135
  
4136
  
4137
  
4138
  
4139
  
4140
  
4141
  
4142
  
4143
  
4144
  
4145
  
4146
  
4147
  
4148
  
4149
  
4150
  
4151
  
4152
  
4153
  
4154
  
4155
  
4156
  
4157
  
4158
  
4159
  
4160
  
4161
  
4162
  
4163
  
4164
  
4165
  
4166
  
4167
  
4168
  
4169
  
4170
  
4171
  
4172
  
4173
  
4174
  
4175
  
4176
  
4177
  
4178
  
4179
  
4180
  
4181
  
4182
  
4183
  
4184
  
4185
  
4186
  
4187
  
4188
  
4189
  
4190
  
4191
  
4192
  
4193
  
4194
  
4195
  
4196
  
4197
  
4198
  
4199
  
4200
  
4201
  
4202
  
4203
  
4204
  
4205
  
4206
  
4207
  
4208
  
4209
  
4210
  
4211
  
4212
  
4213
  
4214
  
4215
  
4216
  
4217
  
4218
  
4219
  
4220
  
4221
  
4222
  
4223
  
4224
  
4225
  
4226
  
4227
  
4228
  
4229
  
4230
  
4231
  
4232
  
4233
  
4234
  
4235
  
4236
  
4237
  
4238
  
4239
  
4240
  
4241
  
4242
  
4243
  
4244
  
4245
  
4246
  
4247
  
4248
  
4249
  
4250
  
4251
  
4252
  
4253
  
4254
  
4255
  
4256
  
4257
  
4258
  
4259
  
4260
  
4261
  
4262
  
4263
  
4264
  
4265
  
4266
  
4267
  
4268
  
4269
  
4270
  
4271
  
4272
  
4273
  
4274
  
4275
  
4276
  
4277
  
4278
  
4279
  
4280
  
4281
  
4282
  
4283
  
4284
  
4285
  
4286
  
4287
  
4288
  
4289
  
4290
  
4291
  
4292
  
4293
  
4294
  
4295
  
4296
  
4297
  
4298
  
4299
  
4300
  
4301
  
4302
  
4303
  
4304
  
4305
  
4306
  
4307
  
4308
  
4309
  
4310
  
4311
  
4312
  
4313
  
4314
  
4315
  
4316
  
4317
  
4318
  
4319
  
4320
  
4321
  
4322
  
4323
  
4324
  
4325
  
4326
  
4327
  
4328
  
4329
  
4330
  
4331
  
4332
  
4333
  
4334
  
4335
  
4336
  
4337
  
4338
  
4339
  
4340
  
4341
  
4342
  
4343
  
4344
  
4345
  
4346
  
4347
  
4348
  
4349
  
4350
  
4351
  
4352
  
4353
  
4354
  
4355
  
4356
  
4357
  
4358
  
4359
  
4360
  
4361
  
4362
  
4363
  
4364
  
4365
  
4366
  
4367
  
4368
  
4369
  
4370
  
4371
  
4372
  
4373
  
4374
  
4375
  
4376
  
4377
  
4378
  
4379
  
4380
  
4381
  
4382
  
4383
  
4384
  
4385
  
4386
  
4387
  
4388
  
4389
  
4390
  
4391
  
4392
  
4393
  
4394
  
4395
  
4396
  
4397
  
4398
  
4399
  
4400
  
4401
  
4402
  
4403
  
4404
  
4405
  
4406
  
4407
  
4408
  
4409
  
4410
  
4411
  
4412
  
4413
  
4414
  
4415
  
4416
  
4417
  
4418
  
4419
  
4420
  
4421
  
4422
  
4423
  
4424
  
4425
  
4426
  
4427
  
4428
  
4429
  
4430
  
4431
  
4432
  
4433
  
4434
  
4435
  
4436
  
4437
  
4438
  
4439
  
4440
  
4441
  
4442
  
4443
  
4444
  
4445
  
4446
  
4447
  
4448
  
4449
  
4450
  
4451
  
4452
  
4453
  
4454
  
4455
  
4456
  
4457
  
4458
  
4459
  
4460
  
4461
  
4462
  
4463
  
4464
  
4465
  
4466
  
4467
  
4468
  
4469
  
4470
  
4471
  
4472
  
4473
  
4474
  
4475
  
4476
  
4477
  
4478
  
4479
  
4480
  
4481
  
4482
  
4483
  
4484
  
4485
  
4486
  
4487
  
4488
  
4489
  
4490
  
4491
  
4492
  
4493
  
4494
  
4495
  
4496
  
4497
  
4498
  
4499
  
4500
  
4501
  
4502
  
4503
  
4504
  
4505
  
4506
  
4507
  
4508
  
4509
  
4510
  
4511
  
4512
  
4513
  
4514
  
4515
  
4516
  
4517
  
4518
  
4519
  
4520
  
4521
  
4522
  
4523
  
4524
  
4525
  
4526
  
4527
  
4528
  
4529
  
4530
  
4531
  
4532
  
4533
  
4534
  
4535
  
4536
  
4537
  
4538
  
4539
  
4540
  
4541
  
4542
  
4543
  
4544
  
4545
  
4546
  
4547
  
4548
  
4549
  
4550
  
4551
  
4552
  
4553
  
4554
  
4555
  
4556
  
4557
  
4558
  
4559
  
4560
  
4561
  
4562
  
4563
  
4564
  
4565
  
4566
  
4567
  
4568
  
4569
  
4570
  
4571
  
4572
  
4573
  
4574
  
4575
  
4576
  
4577
  
4578
  
4579
  
4580
  
4581
  
4582
  
4583
  
4584
  
4585
  
4586
  
4587
  
4588
  
4589
  
4590
  
4591
  
4592
  
4593
  
4594
  
4595
  
4596
  
4597
  
4598
  
4599
  
4600
  
4601
  
4602
  
4603
  
4604
  
4605
  
4606
  
4607
  
4608
  
4609
  
4610
  
4611
  
4612
  
4613
  
4614
  
4615
  
4616
  
4617
  
4618
  
4619
  
4620
  
4621
  
4622
  
4623
  
4624
  
4625
  
4626
  
4627
  
4628
  
4629
  
4630
  
4631
  
4632
  
4633
  
4634
  
4635
  
4636
  
4637
  
4638
  
4639
  
4640
  
4641
  
4642
  
4643
  
4644
  
4645
  
4646
  
4647
  
4648
  
4649
  
4650
  
4651
  
4652
  
4653
  
4654
  
4655
  
4656
  
4657
  
4658
  
4659
  
4660
  
4661
  
4662
  
4663
  
4664
  
4665
  
4666
  
4667
  
4668
  
4669
  
4670
  
4671
  
4672
  
4673
  
4674
  
4675
  
4676
  
4677
  
4678
  
4679
  
4680
  
4681
  
4682
  
4683
  
4684
  
4685
  
4686
  
4687
  
4688
  
4689
  
4690
  
4691
  
4692
  
4693
  
4694
  
4695
  
4696
  
4697
  
4698
  
4699
  
4700
  
4701
  
4702
  
4703
  
4704
  
4705
  
4706
  
4707
  
4708
  
4709
  
4710
  
4711
  
4712
  
4713
  
4714
  
4715
  
4716
  
4717
  
4718
  
4719
  
4720
  
4721
  
4722
  
4723
  
4724
  
4725
  
4726
  
4727
  
4728
  
4729
  
4730
  
4731
  
4732
  
4733
  
4734
  
4735
  
4736
  
4737
  
4738
  
4739
  
4740
  
4741
  
4742
  
4743
  
4744
  
4745
  
4746
  
4747
  
4748
  
4749
  
4750
  
4751
  
4752
  
4753
  
4754
  
4755
  
4756
  
4757
  
4758
  
4759
  
4760
  
4761
  
4762
  
4763
  
4764
  
4765
  
4766
  
4767
  
4768
  
4769
  
4770
  
4771
  
4772
  
4773
  
4774
  
4775
  
4776
  
4777
  
4778
  
4779
  
4780
  
4781
  
4782
  
4783
  
4784
  
4785
  
4786
  
4787
  
4788
  
4789
  
4790
  
4791
  
4792
  
4793
  
4794
  
4795
  
4796
  
4797
  
4798
  
4799
  
4800
  
4801
  
4802
  
4803
  
4804
  
4805
  
4806
  
4807
  
4808
  
4809
  
4810
  
4811
  
4812
  
4813
  
4814
  
4815
  
4816
  
4817
  
4818
  
4819
  
4820
  
4821
  
4822
  
4823
  
4824
  
4825
  
4826
  
4827
  
4828
  
4829
  
4830
  
4831
  
4832
  
4833
  
4834
  
4835
  
4836
  
4837
  
4838
  
4839
  
4840
  
4841
  
4842
  
4843
  
4844
  
4845
  
4846
  
4847
  
4848
  
4849
  
4850
  
4851
  
4852
  
4853
  
4854
  
4855
  
4856
  
4857
  
4858
  
4859
  
4860
  
4861
  
4862
  
4863
  
4864
  
4865
  
4866
  
4867
  
4868
  
4869
  
4870
  
4871
  
4872
  
4873
  
4874
  
4875
  
4876
  
4877
  
4878
  
4879
  
4880
  
4881
  
4882
  
4883
  
4884
  
4885
  
4886
  
4887
  
4888
  
4889
  
4890
  
4891
  
4892
  
4893
  
4894
  
4895
  
4896
  
4897
  
4898
  
4899
  
4900
  
4901
  
4902
  
4903
  
4904
  
4905
  
4906
  
4907
  
4908
  
4909
  
4910
  
4911
  
4912
  
4913
  
4914
  
4915
  
4916
  
4917
  
4918
  
4919
  
4920
  
4921
  
4922
  
4923
  
4924
  
4925
  
4926
  
4927
  
4928
  
4929
  
4930
  
4931
  
4932
  
4933
  
4934
  
4935
  
4936
  
4937
  
4938
  
4939
  
4940
  
4941
  
4942
  
4943
  
4944
  
4945
  
4946
  
4947
  
4948
  
4949
  
4950
  
4951
  
4952
  
4953
  
4954
  
4955
  
4956
  
4957
  
4958
  
4959
  
4960
  
4961
  
4962
  
4963
  
4964
  
4965
  
4966
  
4967
  
4968
  
4969
  
4970
  
4971
  
4972
  
4973
  
4974
  
4975
  
4976
  
4977
  
4978
  
4979
  
4980
  
4981
  
4982
  
4983
  
4984
  
4985
  
4986
  
4987
  
4988
  
4989
  
4990
  
4991
  
4992
  
4993
  
4994
  
4995
  
4996
  
4997
  
4998
  
4999
  
5000
  
5001
  
5002
  
5003
  
5004
  
5005
  
5006
  
5007
  
5008
  
5009
  
5010
  
5011
  
5012
  
5013
  
5014
  
5015
  
5016
  
5017
  
5018
  
5019
  
5020
  
5021
  
5022
  
5023
  
5024
  
5025
  
5026
  
5027
  
5028
  
5029
  
5030
  
5031
  
5032
  
5033
  
5034
  
5035
  
5036
  
5037
  
5038
  
5039
  
5040
  
5041
  
5042
  
5043
  
5044
  
5045
  
5046
  
5047
  
5048
  
5049
  
5050
  
5051
  
5052
  
5053
  
5054
  
5055
  
5056
  
5057
  
5058
  
5059
  
5060
  
5061
  
5062
  
5063
  
5064
  
5065
  
5066
  
5067
  
5068
  
5069
  
5070
  
5071
  
5072
  
5073
  
5074
  
5075
  
5076
  
5077
  
5078
  
5079
  
5080
  
5081
  
5082
  
5083
  
5084
  
5085
  
5086
  
5087
  
5088
  
5089
  
5090
  
5091
  
5092
  
5093
  
5094
  
5095
  
5096
  
5097
  
5098
  
5099
  
5100
  
5101
  
5102
  
5103
  
5104
  
5105
  
5106
  
5107
  
5108
  
5109
  
5110
  
5111
  
5112
  
5113
  
5114
  
5115
  
5116
  
5117
  
5118
  
5119
  
5120
  
5121
  
5122
  
5123
  
5124
  
5125
  
5126
  
5127
  
5128
  
5129
  
5130
  
5131
  
5132
  
5133
  
5134
  
5135
  
5136
  
5137
  
5138
  
5139
  
5140
  
5141
  
5142
  
5143
  
5144
  
5145
  
5146
  
5147
  
5148
  
5149
  
5150
  
5151
  
5152
  
5153
  
5154
  
5155
  
5156
  
5157
  
5158
  
5159
  
5160
  
5161
  
5162
  
5163
  
5164
  
5165
  
5166
  
5167
  
5168
  
5169
  
5170
  
5171
  
5172
  
5173
  
5174
  
5175
  
5176
  
5177
  
5178
  
5179
  
5180
  
5181
  
5182
  
5183
  
5184
  
5185
  
5186
  
5187
  
5188
  
5189
  
5190
  
5191
  
5192
  
5193
  
5194
  
5195
  
5196
  
5197
  
5198
  
5199
  
5200
  
5201
  
5202
  
5203
  
5204
  
5205
  
5206
  
5207
  
5208
  
5209
  
5210
  
5211
  
5212
  
5213
  
5214
  
5215
  
5216
  
5217
  
5218
  
5219
  
5220
  
5221
  
5222
  
5223
  
5224
  
5225
  
5226
  
5227
  
5228
  
5229
  
5230
  
5231
  
5232
  
5233
  
5234
  
5235
  
5236
  
5237
  
5238
  
5239
  
5240
  
5241
  
5242
  
5243
  
5244
  
5245
  
5246
  
5247
  
5248
  
5249
  
5250
  
5251
  
5252
  
5253
  
5254
  
5255
  
5256
  
5257
  
5258
  
5259
  
5260
  
5261
  
5262
  
5263
  
5264
  
5265
  
5266
  
5267
  
5268
  
5269
  
5270
  
5271
  
5272
  
5273
  
5274
  
5275
  
5276
  
5277
  
5278
  
5279
  
5280
  
5281
  
5282
  
5283
  
5284
  
5285
  
5286
  
5287
  
5288
  
5289
  
5290
  
5291
  
5292
  
5293
  
5294
  
5295
  
5296
  
5297
  
5298
  
5299
  
5300
  
5301
  
5302
  
5303
  
5304
  
5305
  
5306
  
5307
  
5308
  
5309
  
5310
  
5311
  
5312
  
5313
  
5314
  
5315
  
5316
  
5317
  
5318
  
5319
  
5320
  
5321
  
5322
  
5323
  
5324
  
5325
  
5326
  
5327
  
5328
  
5329
  
5330
  
5331
  
5332
  
5333
  
5334
  
5335
  
5336
  
5337
  
5338
  
5339
  
5340
  
5341
  
5342
  
5343
  
5344
  
5345
  
5346
  
5347
  
5348
  
5349
  
5350
  
5351
  
5352
  
5353
  
5354
  
5355
  
5356
  
5357
  
5358
  
5359
  
5360
  
5361
  
5362
  
5363
  
5364
  
5365
  
5366
  
5367
  
5368
  
5369
  
5370
  
5371
  
5372
  
5373
  
5374
  
5375
  
5376
  
5377
  
5378
  
5379
  
5380
  
5381
  
5382
  
5383
  
5384
  
5385
  
5386
  
5387
  
5388
  
5389
  
5390
  
5391
  
5392
  
5393
  
5394
  
5395
  
5396
  
5397
  
5398
  
5399
  
5400
  
5401
  
5402
  
5403
  
5404
  
5405
  
5406
  
5407
  
5408
  
5409
  
5410
  
5411
  
5412
  
5413
  
5414
  
5415
  
5416
  
5417
  
5418
  
5419
  
5420
  
5421
  
5422
  
5423
  
5424
  
5425
  
5426
  
5427
  
5428
  
5429
  
5430
  
5431
  
5432
  
5433
  
5434
  
5435
  
5436
  
5437
  
5438
  
5439
  
5440
  
5441
  
5442
  
5443
  
5444
  
5445
  
5446
  
5447
  
5448
  
5449
  
5450
  
5451
  
5452
  
5453
  
5454
  
5455
  
5456
  
5457
  
5458
  
5459
  
5460
  
5461
  
5462
  
5463
  
5464
  
5465
  
5466
  
5467
  
5468
  
5469
  
5470
  
5471
  
5472
  
5473
  
5474
  
5475
  
5476
  
5477
  
5478
  
5479
  
5480
  
5481
  
5482
  
5483
  
5484
  
5485
  
5486
  
5487
  
5488
  
5489
  
5490
  
5491
  
5492
  
5493
  
5494
  
5495
  
5496
  
5497
  
5498
  
5499
  
5500
  
5501
  
5502
  
5503
  
5504
  
5505
  
5506
  
5507
  
5508
  
5509
  
5510
  
5511
  
5512
  
5513
  
5514
  
5515
  
5516
  
5517
  
5518
  
5519
  
5520
  
5521
  
5522
  
5523
  
5524
  
5525
  
5526
  
5527
  
5528
  
5529
  
5530
  
5531
  
5532
  
5533
  
5534
  
5535
  
5536
  
5537
  
5538
  
5539
  
5540
  
5541
  
5542
  
5543
  
5544
  
5545
  
5546
  
5547
  
5548
  
5549
  
5550
  
5551
  
5552
  
5553
  
5554
  
5555
  
5556
  
5557
  
5558
  
5559
  
5560
  
5561
  
5562
  
5563
  
5564
  
5565
  
5566
  
5567
  
5568
  
5569
  
5570
  
5571
  
5572
  
5573
  
5574
  
5575
  
5576
  
5577
  
5578
  
5579
  
5580
  
5581
  
5582
  
5583
  
5584
  
5585
  
5586
  
5587
  
5588
  
5589
  
5590
  
5591
  
5592
  
5593
  
5594
  
5595
  
5596
  
5597
  
5598
  
5599
  
5600
  
5601
  
5602
  
5603
  
5604
  
5605
  
5606
  
5607
  
5608
  
5609
  
5610
  
5611
  
5612
  
5613
  
5614
  
5615
  
5616
  
5617
  
5618
  
5619
  
5620
  
5621
  
5622
  
5623
  
5624
  
5625
  
5626
  
5627
  
5628
  
5629
  
5630
  
5631
  
5632
  
5633
  
5634
  
5635
  
5636
  
5637
  
5638
  
5639
  
5640
  
5641
  
5642
  
5643
  
5644
  
5645
  
5646
  
5647
  
5648
  
5649
  
5650
  
5651
  
5652
  
5653
  
5654
  
5655
  
5656
  
5657
  
5658
  
5659
  
5660
  
5661
  
5662
  
5663
  
5664
  
5665
  
5666
  
5667
  
5668
  
5669
  
5670
  
5671
  
5672
  
5673
  
5674
  
5675
  
5676
  
5677
  
5678
  
5679
  
5680
  
5681
  
5682
  
5683
  
5684
  
5685
  
5686
  
5687
  
5688
  
5689
  
5690
  
5691
  
5692
  
5693
  
5694
  
5695
  
5696
  
5697
  
5698
  
5699
  
5700
  
5701
  
5702
  
5703
  
5704
  
5705
  
5706
  
5707
  
5708
  
5709
  
5710
  
5711
  
5712
  
5713
  
5714
  
5715
  
5716
  
5717
  
5718
  
5719
  
5720
  
5721
  
5722
  
5723
  
5724
  
5725
  
5726
  
5727
  
5728
  
5729
  
5730
  
5731
  
5732
  
5733
  
5734
  
5735
  
5736
  
5737
  
5738
  
5739
  
5740
  
5741
  
5742
  
5743
  
5744
  
5745
  
5746
  
5747
  
5748
  
5749
  
5750
  
5751
  
5752
  
5753
  
5754
  
5755
  
5756
  
5757
  
5758
  
5759
  
5760
  
5761
  
5762
  
5763
  
5764
  
5765
  
5766
  
5767
  
5768
  
5769
  
5770
  
5771
  
5772
  
5773
  
5774
  
5775
  
5776
  
5777
  
5778
  
5779
  
5780
  
5781
  
5782
  
5783
  
5784
  
5785
  
5786
  
5787
  
5788
  
5789
  
5790
  
5791
  
5792
  
5793
  
5794
  
5795
  
5796
  
5797
  
5798
  
5799
  
5800
  
5801
  
5802
  
5803
  
5804
  
5805
  
5806
  
5807
  
5808
  
5809
  
5810
  
5811
  
5812
  
5813
  
5814
  
5815
  
5816
  
5817
  
5818
  
5819
  
5820
  
5821
  
5822
  
5823
  
5824
  
5825
  
5826
  
5827
  
5828
  
5829
  
5830
  
5831
  
5832
  
5833
  
5834
  
5835
  
5836
  
5837
  
5838
  
5839
  
5840
  
5841
  
5842
  
5843
  
5844
  
5845
  
5846
  
5847
  
5848
  
5849
  
5850
  
5851
  
5852
  
5853
  
5854
  
5855
  
5856
  
5857
  
5858
  
5859
  
5860
  
5861
  
5862
  
5863
  
5864
  
5865
  
5866
  
5867
  
5868
  
5869
  
5870
  
5871
  
5872
  
5873
  
5874
  
5875
  
5876
  
5877
  
5878
  
5879
  
5880
  
5881
  
5882
  
5883
  
5884
  
5885
  
5886
  
5887
  
5888
  
5889
  
5890
  
5891
  
5892
  
5893
  
5894
  
5895
  
5896
  
5897
  
5898
  
5899
  
5900
  
5901
  
5902
  
5903
  
5904
  
5905
  
5906
  
5907
  
5908
  
5909
  
5910
  
5911
  
5912
  
5913
  
5914
  
5915
  
5916
  
5917
  
5918
  
5919
  
5920
  
5921
  
5922
  
5923
  
5924
  
5925
  
5926
  
5927
  
5928
  
5929
  
5930
  
5931
  
5932
  
5933
  
5934
  
5935
  
5936
  
5937
  
5938
  
5939
  
5940
  
5941
  
5942
  
5943
  
5944
  
5945
  
5946
  
5947
  
5948
  
5949
  
5950
  
5951
  
5952
  
5953
  
5954
  
5955
  
5956
  
5957
  
5958
  
5959
  
5960
  
5961
  
5962
  
5963
  
5964
  
5965
  
5966
  
5967
  
5968
  
5969
  
5970
  
5971
  
5972
  
5973
  
5974
  
5975
  
5976
  
5977
  
5978
  
5979
  
5980
  
5981
  
5982
  
5983
  
5984
  
5985
  
5986
  
5987
  
5988
  
5989
  
5990
  
5991
  
5992
  
5993
  
5994
  
5995
  
5996
  
5997
  
5998
  
5999
  
6000
  
6001
  
6002
  
6003
  
6004
  
6005
  
6006
  
6007
  
6008
  
6009
  
6010
  
6011
  
6012
  
6013
  
6014
  
6015
  
6016
  
6017
  
6018
  
6019
  
6020
  
6021
  
6022
  
6023
  
6024
  
6025
  
6026
  
6027
  
6028
  
6029
  
6030
  
6031
  
6032
  
6033
  
6034
  
6035
  
6036
  
6037
  
6038
  
6039
  
6040
  
6041
  
6042
  
6043
  
6044
  
6045
  
6046
  
6047
  
6048
  
6049
  
6050
  
6051
  
6052
  
6053
  
6054
  
6055
  
6056
  
6057
  
6058
  
6059
  
6060
  
6061
  
6062
  
6063
  
6064
  
6065
  
6066
  
6067
  
6068
  
6069
  
6070
  
6071
  
6072
  
6073
  
6074
  
6075
  
6076
  
6077
  
6078
  
6079
  
/* -*- c -*- 
|| This file is part of Pike. For copyright information see COPYRIGHT. 
|| Pike is distributed under GPL, LGPL and MPL. See the file COPYING 
|| for more information. 
*/ 
 
/* 
 * Backend object. 
 */ 
 
#include "global.h" 
#include "fdlib.h" 
#include "backend.h" 
#include "time_stuff.h" 
#include <errno.h> 
#ifdef HAVE_SYS_PARAM_H 
#include <sys/param.h> 
#endif 
#include "interpret.h" 
#include "object.h" 
#include "pike_error.h" 
#include "fd_control.h" 
#include "main.h" 
#include "callback.h" 
#include "threads.h" 
#include "array.h" 
#include <math.h> 
#include "interpret.h" 
#include "stuff.h" 
#include "bignum.h" 
#include "builtin_functions.h" 
#include "mapping.h" 
#include "svalue.h" 
#include "gc.h" 
#include "module_support.h" 
#include "block_allocator.h" 
#include "sprintf.h" 
 
/* 
 * Things to do 
 * 
 * o what happens to callbacks on destruct? 
 * 
 *   They will just cease to generate any events. If the callback 
 *   container object uses the old callback interface and has added an 
 *   extra ref to itself to account for the callback connection it 
 *   will become garbage. The new interface fixes this. /mast 
 * 
 * o automatic callback assignment based on current thread 
 * 
 *   Sounds very odd to me. /mast 
 */ 
 
/* For select */ 
#ifdef HAVE_SYS_SELECT_H 
#include <sys/select.h> 
#else 
/* BeOS socket (select etc) stuff */ 
#ifdef HAVE_NET_SOCKET_H 
#include <net/socket.h> 
#endif 
#endif 
#include <sys/stat.h> 
 
/* For poll and /dev/poll and epoll */ 
#ifdef HAVE_POLL_H 
#include <poll.h> 
#endif /* HAVE_POLL_H */ 
#ifdef HAVE_SYS_POLL_H 
#include <sys/poll.h> 
#endif /* HAVE_SYS_POLL_H */ 
 
/* For /dev/poll */ 
#ifdef HAVE_SYS_DEVPOLL_H 
#include <sys/devpoll.h> 
#endif /* HAVE_SYS_DEVPOLL_H */ 
 
/* For epoll */ 
#ifdef HAVE_SYS_EPOLL_H 
#include <sys/epoll.h> 
#endif /* HAVE_SYS_EPOLL_H */ 
 
/* For kqueue. */ 
#ifdef HAVE_SYS_EVENT_H 
#include <sys/event.h> 
#endif /* HAVE_SYS_EVENT_H */ 
 
/* for kqueue + CFRunLoop */ 
#ifdef HAVE_CORESERVICES_CORESERVICES_H 
#include <CoreServices/CoreServices.h> 
#endif /* HAVE_CORESERVICES_CORESERVICES_H */ 
 
/* The following are used on Linux'es that have an old libc. */ 
#ifdef HAVE_SYSCALL_H 
#include <syscall.h> 
#elif defined(HAVE_SYS_SYSCALL_H) 
#include <sys/syscall.h> 
#endif /* HAVE_SYSCALL_H || HAVE_SYS_SYSCALL_H */ 
 
#if defined(BACKEND_USES_POLL_DEVICE) || defined(BACKEND_USES_KQUEUE) 
struct program * PollDeviceBackend_program; 
#endif /* BACKEND_USES_POLL_DEVICE || BACKEND_USES_KQUEUE */ 
 
/* 
 * Debugging and tracing. 
 */ 
 
/* #define POLL_DEBUG */ 
/* #define CALL_OUT_DEBUG */ 
 
#ifdef PIKE_EXTRA_DEBUG 
/* #define POLL_DEBUG */ 
/* #define CALL_OUT_DEBUG */ 
#endif 
 
#ifdef POLL_DEBUG 
#define PDWERR(...) fprintf(stderr,__VA_ARGS__) 
#define PDUNUSED(x) x 
#else /* !POLL_DEBUG */ 
#define PDWERR(...) 
#define PDUNUSED(x) UNUSED(x) 
#endif /* POLL_DEBUG */ 
 
#ifdef CALL_OUT_DEBUG 
#define IF_CO(X) X 
#define COWERR(...) fprintf(stderr,__VA_ARGS__) 
#else 
#define IF_CO(X) 
#define COWERR(...) 
#endif 
 
#ifdef PIKE_THREADS 
#define THR_NO (int) PTR_TO_INT (THREAD_T_TO_PTR (th_self())) 
#else 
#define THR_NO getpid() 
#endif 
 
/* Declarations for the legacy backend interface stuff. */ 
 
static struct compat_cb_box * alloc_compat_cb_box(void); 
static void really_free_compat_cb_box(struct compat_cb_box * b); 
static int compat_box_dispatcher (struct fd_callback_box *box, int event); 
 
#ifdef BACKEND_USES_CFRUNLOOP 
/* Used by CoreFoundation backend to get around lack of PDB structures 
 * early in the CMOD. 
 */ 
static void check_set_timer(struct timeval when); 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
/* CALL OUT STUFF */ 
 
#ifdef PIKE_DEBUG 
#define MESS_UP_BLOCK(X) do {\ 
 (X)->next_arr=(struct Backend_CallOut_struct *)(ptrdiff_t)-1; \ 
 (X)->next_fun=(struct Backend_CallOut_struct *)(ptrdiff_t)-1; \ 
 (X)->prev_arr=(struct Backend_CallOut_struct **)(ptrdiff_t)-1; \ 
 (X)->prev_fun=(struct Backend_CallOut_struct **)(ptrdiff_t)-1; \ 
 (X)->pos=-1; \ 
 } while(0) 
#else 
#define MESS_UP_BLOCK(X) 
#endif 
 
#undef EXIT_BLOCK 
#define EXIT_BLOCK(X) do { \ 
  *(X->prev_arr)=X->next_arr; \ 
  if(X->next_arr) X->next_arr->prev_arr=X->prev_arr; \ 
  *(X->prev_fun)=X->next_fun; \ 
  if(X->next_fun) X->next_fun->prev_fun=X->prev_fun; \ 
  MESS_UP_BLOCK(X); \ 
  } while(0) 
 
struct hash_ent 
{ 
  struct Backend_CallOut_struct *arr; 
  struct Backend_CallOut_struct *fun; 
}; 
 
 
 
#define DEFAULT_CMOD_STORAGE 
 
DECLARATIONS 
 
 
struct callback_list do_debug_callbacks; 
struct timeval current_time; 
int current_time_invalid = 1; 
 
/* 
 * Stuff to map fds to the proper Backend 
 */ 
static struct Backend_struct **fd_map=0; 
static int fd_map_size=0; 
static struct object *default_backend_obj = NULL; 
 
/** 
 * The default backend object. 
 */ 
PMOD_EXPORT struct Backend_struct *default_backend = NULL; 
 
#ifdef DO_PIKE_CLEANUP 
static int num_active_backends = 0; 
#endif 
 
#if defined(BACKEND_USES_CFRUNLOOP) 
static void noteEvents(CFFileDescriptorRef fdref, CFOptionFlags callBackTypes, 
                       void *info); 
void cfObserverCallback(CFRunLoopObserverRef observer, 
                        CFRunLoopActivity activity, void* info); 
void cfTimerCallback(CFRunLoopTimerRef timer, void * info); 
 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
static int backend_do_call_outs(struct Backend_struct *me); 
#ifdef PIKE_DEBUG 
static void backend_verify_call_outs(struct Backend_struct *me); 
#endif 
#ifdef DO_PIKE_CLEANUP 
static void backend_cleanup(void); 
#endif 
 
struct Backend_struct *get_backend_for_fd(int fd) 
{ 
  if(fd<0 || fd>=fd_map_size) return 0; 
  return fd_map[fd]; 
} 
 
static void low_set_backend_for_fd(int fd, struct Backend_struct *b) 
{ 
#ifdef PIKE_DEBUG 
  if(fd<0) Pike_fatal("set_backend_for(%d)\n",fd); 
#endif 
  if (!b) { 
    /* Unregister the fd. */ 
    if (fd < fd_map_size) { 
      fd_map[fd] = NULL; 
    } 
    return; 
  } 
  if(fd >= fd_map_size) 
  { 
    int old=fd_map_size; 
    if(!fd_map_size) fd_map_size=64; 
    while(fd >= fd_map_size) fd_map_size*=2; 
    if (fd_map) { 
      fd_map = realloc(fd_map, sizeof(struct Backend_struct *) * fd_map_size); 
    } else { 
      fd_map = calloc(sizeof(struct Backend_struct *), fd_map_size); 
    } 
    if(!fd_map) 
      Pike_fatal("Out of memory in backend:low_set_backend_for_fd.\n" 
                 "Tried to allocate %"PRINTSIZET"d bytes.\n", 
                 sizeof(struct Backend_struct *) * fd_map_size); 
 
    memset(fd_map+old,0,sizeof(struct Backend_struct *) * (fd_map_size-old)); 
  } 
  fd_map[fd]=b; 
} 
 
struct Backend_struct *really_get_backend_for_fd(int fd) 
{ 
  struct Backend_struct *b; 
  if((b=get_backend_for_fd(fd))) 
    return b; 
 
#ifdef PIKE_DEBUG 
  if(!default_backend) 
    Pike_fatal("No backend!\n"); 
#endif 
  low_set_backend_for_fd(fd, default_backend); 
  return default_backend; 
} 
 
/*! @module Pike 
 */ 
 
/*! @class __Backend 
 *!   Base class for the various backend implementations. 
 *! 
 *!   Implements callback registration functions and defines the 
 *!   main backend APIs. 
 */ 
PIKECLASS Backend 
{ 
  /* Provide a unique count to be able to tell backends apart with _sprintf. */ 
  static int unused_id = 0; 
  CVAR int id; 
 
  CVAR struct timeval next_timeout; 
 
  /* 
   * Backend callbacks 
   */ 
  CVAR struct callback_list backend_callbacks; 
 
  /*! @decl function(Backend:void) before_callback 
   *! @decl function(Backend:void) after_callback 
   *! 
   *! If set, these are called just before and after the backend waits 
   *! for an event. 
   *! 
   *! If an error is thrown from these callbacks then it is reported 
   *! using @expr{master()->handle_error()@} - it doesn't interrupt 
   *! the operation of the backend. 
   */ 
  /* before_callback is not strictly necessary since one can just as 
   * well run it before `(), but it's convenient to have a standard 
   * place to hook in a function. */ 
  PIKEVAR function(Backend:void) before_callback; 
  PIKEVAR function(Backend:void) after_callback; 
 
#ifdef PIKE_THREADS 
  /* Thread currently executing in the backend. */ 
  CVAR struct thread_state *exec_thread; 
 
  /* Signal to wake up other threads waiting on the backend. */ 
  CVAR COND_T backend_signal; 
 
  /* Incremented when a backend round has called any callbacks. */ 
  CVAR int done_counter; 
#else 
  CVAR int exec_thread;         /* 1 if inside the backend. */ 
#endif 
 
  /* 
   * PIPE for waking up 
   */ 
  CVAR int wakeup_pipe_send_fd; 
  CVAR struct fd_callback_box wakeup_cb_box; 
  CVAR int may_need_wakeup; 
 
  /* 
   * FD callback data 
   */ 
 
  /* An array indexed on fd with arbitrary upper and lower bounds. A 
   * lower bound is used to cut down the size for backends that 
   * handle a single or only a few fd's. */ 
  CVAR struct fd_callback_box **fd_boxes; 
  CVAR int fd_boxes_start, fd_boxes_size; 
 
  /* Callback boxes which don't correspond to any open file. */ 
  CVAR struct fd_callback_box **inactive_boxes, **free_inactive_box; 
  CVAR int inactive_boxes_size; 
 
  /* 
   * Hooks to inheriting classes. 
   */ 
#ifdef PIKE_DEBUG 
  typedef void debug_handler_fn (struct Backend_struct *me, void *data); 
  CVAR debug_handler_fn *debug_handler; 
#endif /* PIKE_DEBUG */ 
  typedef void update_fd_set_handler_fn (struct Backend_struct *me, void *data, 
                                         int fd, 
                                         int old_events, int new_events, 
                                         int flags); 
  CVAR update_fd_set_handler_fn *update_fd_set_handler; 
  CVAR void *handler_data; 
 
  /* 
   * CALL OUT variables 
   */ 
  CVAR int num_pending_calls;               /* no of busy pointers in heap */ 
  CVAR struct Backend_CallOut_struct **call_heap;   /* pointer to heap */ 
  CVAR int call_heap_size;                  /* no of pointers in heap */ 
 
  CVAR unsigned int hash_size; 
  CVAR unsigned int hash_order; 
  CVAR struct hash_ent *call_hash; 
 
  /* Should really exist only in PIKE_DEBUG, but 
   * #ifdefs on the last cvar confuses precompile.pike. 
   *    /grubba 2001-03-12 
   * Should be fixed now -Hubbe 
   */ 
#ifdef PIKE_DEBUG 
  CVAR int inside_call_out; 
#endif 
 
  /* The object we're in. This ref isn't refcounted. */ 
  CVAR struct object *backend_obj; 
 
#ifdef _REENTRANT 
  /* Currently only used for poll devices. */ 
  CVAR int set_busy; 
  CVAR COND_T set_change; 
#endif 
 
  DECLARE_STORAGE 
 
  /** 
   * Get the backend object containing the provided Backend_struct. 
   */ 
  PMOD_EXPORT struct object *get_backend_obj (struct Backend_struct *b) 
  { 
    return b->backend_obj; 
  } 
 
  static int wakeup_callback(struct fd_callback_box *box, int UNUSED(event)) 
  { 
    char buffer[1024]; 
    while( (fd_read(box->fd, buffer, sizeof(buffer)) < 0) && (errno==EINTR)) 
      ; /* Clear 'flag' */ 
#ifdef _REENTRANT 
    while (box->backend->set_busy) { 
      co_wait_interpreter(&box->backend->set_change); 
    } 
#endif /* _REENTRANT */ 
    return 0; 
  } 
 
  /** 
   * Wake up the provided backend. 
   * 
   * This is used by threaded programs and signals to wake up the 
   * master 'thread'. 
   * 
   * It's called from the signal handler so it must not lock any mutex 
   * whatsoever. E.g. dmalloc stuff is verboten here. 
   */ 
  PMOD_EXPORT void backend_wake_up_backend(struct Backend_struct *me) 
  { 
    char foo=0; 
 
    if(me && me->may_need_wakeup && (me->wakeup_pipe_send_fd >= 0)) { 
      /* Avoid fd_write with its dmalloc stuff. */ 
      int len; 
      do { 
        len = 
#ifdef HAVE_WINSOCK_H 
          debug_fd_write 
#else 
          write 
#endif 
          (me->wakeup_pipe_send_fd, &foo ,1); 
      } while ((len < 0) && (errno == EINTR)); 
    } 
  } 
 
  /** 
   * Lower the timeout 
   * 
   * Typically used from backend callbacks. 
   */ 
  PMOD_EXPORT void backend_lower_timeout(struct Backend_struct *me, 
                                         struct timeval *tv) 
  { 
    if (my_timercmp(tv, <=, &me->next_timeout)) { 
      me->next_timeout = *tv; 
    } 
  } 
 
  /* 
   * Backend callbacks. 
   */ 
 
  PMOD_EXPORT struct callback *backend_debug_add_backend_callback( 
    struct Backend_struct *me, callback_func call, void *arg, 
    callback_func free_func) 
  { 
    return add_to_callback(& me->backend_callbacks, call, arg, free_func); 
  } 
 
  void call_backend_monitor_cb (struct Backend_struct *me, struct svalue *cb) 
  { 
    ref_push_object (me->backend_obj); 
    safe_apply_svalue (cb, 1, 1); 
    pop_stack(); 
  } 
 
  /* 
   * Call outs. 
   */ 
 
  /*! @class CallOut 
   *! 
   *! Represents a single call_out in the call_out list. 
   *! 
   *! @seealso 
   *!   @[call_out()] 
   */ 
  PIKECLASS CallOut 
    program_flags PROGRAM_USES_PARENT|PROGRAM_NEEDS_PARENT; 
    flags ID_PROTECTED|ID_PRIVATE|ID_USED; 
  { 
    CVAR INT32 pos; 
    CVAR size_t fun_hval; 
    CVAR struct timeval tv; 
    CVAR struct Backend_CallOut_struct *next_fun; 
    CVAR struct Backend_CallOut_struct **prev_fun; 
    CVAR struct Backend_CallOut_struct *next_arr; 
    CVAR struct Backend_CallOut_struct **prev_arr; 
    /*! @decl protected array args 
     *! 
     *! The array containing the function and arguments. 
     */ 
    PIKEVAR array args 
      flags ID_PROTECTED; 
    CVAR struct object *this; 
 
    DECLARE_STORAGE; 
 
#undef CAR 
#undef CDR 
 
#define CAR(X) (((X)<<1)+1) 
#define CDR(X) (((X)<<1)+2) 
#define PARENT(X) (((X)-1)>>1) 
#define CALL_(X) (me->call_heap[(X)]) 
#define CALL(X) ((struct Backend_CallOut_struct *)debug_malloc_pass(CALL_(X))) 
#define MOVECALL(X,Y) do { INT32 p_=(X); (CALL_(p_)=CALL(Y))->pos=p_; }while(0) 
#define CMP(X,Y) my_timercmp(& CALL(X)->tv, <, & CALL(Y)->tv) 
#define SWAP(X,Y) do {                             \ 
      struct Backend_CallOut_struct *_tmp=CALL(X); \ 
      (CALL_(X)=CALL(Y))->pos=(X);                 \ 
      (CALL_(Y)=_tmp)->pos=(Y);                    \ 
    } while(0) 
 
#ifdef PIKE_DEBUG 
  static void do_unprotect_call_outs(struct Backend_struct *me) 
  { 
    me->inside_call_out = 0; 
  } 
 
#define DECLARE_PROTECT_CALL_OUTS       ONERROR pco_err 
#define PROTECT_CALL_OUTS()                                     \ 
  do {                                                          \ 
    if(me->inside_call_out)                                     \ 
      Pike_fatal("Recursive call in call_out module.\n");       \ 
    SET_ONERROR(pco_err, do_unprotect_call_outs, me);           \ 
    me->inside_call_out=1;                                      \ 
  } while(0) 
 
#define UNPROTECT_CALL_OUTS()                   \ 
  CALL_AND_UNSET_ONERROR(pco_err) 
 
#else /* !PIKE_DEBUG */ 
#define DECLARE_PROTECT_CALL_OUTS 
#define PROTECT_CALL_OUTS() 
#define UNPROTECT_CALL_OUTS() 
#endif /* PIKE_DEBUG */ 
 
#ifdef PIKE_DEBUG 
 
 static void backend_verify_call_outs(struct Backend_struct *me) 
   { 
     struct array *v; 
     int e,d; 
 
     if(!d_flag) return; 
     if(!me->call_heap) return; 
 
     if(me->num_pending_calls<0 || me->num_pending_calls>me->call_heap_size) 
       Pike_fatal("Error in call out tables.\n"); 
 
     if(d_flag<2) return; 
 
     for(e=0;e<me->num_pending_calls;e++) 
     { 
       if(e) 
       { 
         if(CMP(e, PARENT(e))) 
           Pike_fatal("Error in call out heap. (@ %d)\n",e); 
       } 
 
       if(!(v=CALL(e)->args)) 
         Pike_fatal("No arguments to call.\n"); 
 
       if(v->refs < 1) 
         Pike_fatal("Array should have at least one reference.\n"); 
 
       if(v->malloced_size<v->size) 
         Pike_fatal("Impossible array.\n"); 
 
       if(!v->size) 
         Pike_fatal("Call out array of zero size!\n"); 
 
       if(CALL(e)->prev_arr[0] != CALL(e)) 
         Pike_fatal("call_out[%d]->prev_arr[0] is wrong!\n",e); 
 
       if(CALL(e)->prev_fun[0] != CALL(e)) 
         Pike_fatal("call_out[%d]->prev_fun[0] is wrong!\n",e); 
 
       if(CALL(e)->pos != e) 
         Pike_fatal("Call_out->pos is not correct!\n"); 
 
       if(d_flag>4) 
       { 
         for(d=e+1;d<me->num_pending_calls;d++) 
           if(CALL(e)->args == CALL(d)->args) 
             Pike_fatal("Duplicate call out in heap.\n"); 
       } 
     } 
 
     for(d=0;d<10 && e<me->call_heap_size;d++,e++) { 
       if (CALL(e)) Pike_fatal("Call out left in heap.\n"); 
     } 
 
     for(e=0;e<(int)me->hash_size;e++) 
     { 
       struct Backend_CallOut_struct *c,**prev; 
       for(prev=& me->call_hash[e].arr;(c=*prev);prev=& c->next_arr) 
       { 
         if(c->prev_arr != prev) 
           Pike_fatal("c->prev_arr is wrong %p.\n",c); 
 
         if(c->pos<0) 
           Pike_fatal("Free call_out in call_out hash table %p.\n",c); 
       } 
 
       for(prev=& me->call_hash[e].fun;(c=*prev);prev=& c->next_fun) 
       { 
         if(c->prev_fun != prev) 
           Pike_fatal("c->prev_fun is wrong %p.\n",c); 
 
         if(c->pos<0) 
           Pike_fatal("Free call_out in call_out hash table %p.\n",c); 
       } 
     } 
   } 
 
 
#else 
#define backend_verify_call_outs(X) 
#endif 
 
 
 static void adjust_down(struct Backend_struct *me,int pos) 
   { 
     while(1) 
     { 
       int a=CAR(pos), b=CDR(pos); 
       if(a >= me->num_pending_calls) break; 
       if(b < me->num_pending_calls) 
         if(CMP(b, a)) 
           a=b; 
 
       if(CMP(pos, a)) break; 
       SWAP(pos, a); 
       pos=a; 
     } 
   } 
 
 static int adjust_up(struct Backend_struct *me,int pos) 
   { 
     int parent=PARENT(pos); 
     int from; 
#ifdef PIKE_DEBUG 
     if(pos <0 || pos>=me->num_pending_calls) 
       Pike_fatal("Bad argument to adjust_up(%d)\n",pos); 
#endif 
     if(!pos) return 0; 
 
     if(CMP(pos, parent)) 
     { 
       SWAP(pos, parent); 
       from=pos; 
       pos=parent; 
       while(pos && CMP(pos, PARENT(pos))) 
       { 
         parent=PARENT(pos); 
         SWAP(pos, parent); 
         from=pos; 
         pos=parent; 
       } 
       from+=from&1 ? 1 : -1; 
       if(from < me->num_pending_calls && CMP(from, pos)) 
       { 
         SWAP(from, pos); 
         adjust_down(me,from); 
       } 
       return 1; 
     } 
     return 0; 
   } 
 
 static void adjust(struct Backend_struct *me,int pos) 
   { 
     if(!adjust_up(me,pos)) adjust_down(me,pos); 
   } 
 
    INIT 
    { 
      THIS->pos = -1; 
      THIS->this = Pike_fp->current_object; 
    } 
 
    EXIT 
    { 
      struct Backend_CallOut_struct *this = THIS; 
 
      if (this->pos >= 0) { 
        /* Still active in the heap. DO_PIKE_CLEANUP? */ 
        struct Backend_struct *me = parent_storage(1, Backend_program); 
        int e = this->pos; 
 
        me->num_pending_calls--; 
        if (e != me->num_pending_calls) { 
          MOVECALL(e, me->num_pending_calls); 
          adjust(me, e); 
        } 
        CALL_(me->num_pending_calls) = NULL; 
        this->pos = -1; 
        free_object(this->this); 
        this->this = NULL; 
      } 
      EXIT_BLOCK(this); 
    } 
 
    /*! @decl void create(int|float seconds, mixed fun, mixed ... args) 
     *! 
     *!   Start a new call out. 
     *! 
     *!   This is the low-level implementation of @[call_out()]. 
     *! 
     *!   @[call_out()] is essentially implemented as: 
     *!   @code 
     *!     array call_out(mixed fun, int|float seconds, mixed ... args) 
     *!     { 
     *!       return CallOut(seconds, fun, @@args)->args; 
     *!     } 
     *!   @endcode 
     *! 
     *! @seealso 
     *!   @[call_out()] 
     */ 
    PIKEFUN void create(int|float seconds, mixed fun, mixed ... extra_args) 
      flags ID_PROTECTED; 
    { 
      struct array *callable; 
      size_t fun_hval; 
      size_t hval; 
      struct Backend_struct *me = parent_storage(1, Backend_program); 
      struct Backend_CallOut_struct *new = THIS; 
      DECLARE_PROTECT_CALL_OUTS; 
 
      push_array(callable = aggregate_array(args - 1)); 
      args = 2; 
 
      /* NOTE: hash_svalue() can run Pike code! */ 
      fun_hval = hash_svalue(ITEM(callable)); 
 
      PROTECT_CALL_OUTS(); 
      if(me->num_pending_calls == me->call_heap_size) 
      { 
        /* here we need to allocate space for more pointers */ 
        struct Backend_CallOut_struct **new_heap; 
 
        if(!me->call_heap || !me->call_hash) 
        { 
          if (!me->call_heap) { 
            me->call_heap_size = 128; 
            me->call_heap = xcalloc(sizeof(struct Backend_CallOut_struct *), 
                                    me->call_heap_size); 
            me->num_pending_calls = 0; 
          } 
 
          if (!me->call_hash) { 
            me->hash_size = hashprimes[me->hash_order]; 
            me->call_hash = xcalloc(sizeof(struct hash_ent), me->hash_size); 
          } 
        }else{ 
          struct hash_ent *new_hash; 
          int e; 
 
          new_heap = xrealloc(me->call_heap, 
            sizeof(struct Backend_CallOut_struct *)*me->call_heap_size*2); 
          memset(new_heap + me->call_heap_size, 0, 
                 sizeof(struct Backend_CallOut_struct *)*me->call_heap_size); 
          me->call_heap_size *= 2; 
          me->call_heap = new_heap; 
 
          if((new_hash=calloc(sizeof(struct hash_ent), 
                              hashprimes[me->hash_order+1]))) 
          { 
            free(me->call_hash); 
            me->call_hash = new_hash; 
            me->hash_size = hashprimes[++me->hash_order]; 
 
            /* Re-hash */ 
            for(e=0;e<me->num_pending_calls;e++) 
            { 
              struct Backend_CallOut_struct *c = CALL(e); 
              hval = PTR_TO_INT(c->args); 
 
#define LINK(X,c)                                                       \ 
              hval %= me->hash_size;                                    \ 
              if((c->PIKE_CONCAT(next_,X) = me->call_hash[hval].X))     \ 
                c->PIKE_CONCAT(next_,X)->PIKE_CONCAT(prev_,X) =         \ 
                  &c->PIKE_CONCAT(next_,X);                             \ 
              c->PIKE_CONCAT(prev_,X) = &me->call_hash[hval].X;         \ 
              me->call_hash[hval].X = c 
 
              LINK(arr,c); 
              hval = c->fun_hval; 
              LINK(fun,c); 
            } 
          } 
        } 
      } 
 
#ifdef PIKE_DEBUG 
      if (CALL(me->num_pending_calls)) { 
        Pike_fatal("Lost call out in heap.\n"); 
      } 
#endif /* PIKE_DEBUG */ 
 
      CALL_(me->num_pending_calls) = new; 
      new->pos = me->num_pending_calls++; 
      add_ref(Pike_fp->current_object); 
 
      { 
        hval = PTR_TO_INT(callable); 
        LINK(arr,new); 
        hval = new->fun_hval = fun_hval; 
        LINK(fun,new); 
      } 
 
      switch(TYPEOF(*seconds)) 
      { 
      case T_INT: 
        new->tv.tv_sec = seconds->u.integer; 
        new->tv.tv_usec = 0; 
        break; 
 
      case T_FLOAT: 
        { 
          FLOAT_TYPE tmp = seconds->u.float_number; 
          new->tv.tv_sec = (long)floor(tmp); 
          new->tv.tv_usec = (long)(1000000.0 * (tmp - floor(tmp))); 
          break; 
        } 
 
      default: 
        Pike_fatal("Bad timeout to new_call_out!\n"); 
      } 
 
#ifdef BACKEND_USES_CFRUNLOOP 
      check_set_timer(new->tv); 
#endif 
#ifdef _REENTRANT 
      if(num_threads>1) 
      { 
        struct timeval tmp; 
        ACCURATE_GETTIMEOFDAY(&tmp); 
        my_add_timeval(& new->tv, &tmp); 
        COWERR("BACKEND[%d]: Adding call out at %ld.%ld " 
               "(current time is %ld.%ld)\n", me->id, 
               new->tv.tv_sec, new->tv.tv_usec, 
               tmp.tv_sec, tmp.tv_usec); 
      } else 
#endif 
      { 
        struct timeval tmp; 
        INACCURATE_GETTIMEOFDAY(&tmp); 
        my_add_timeval(& new->tv, &tmp); 
        COWERR("BACKEND[%d]: Adding call out at %ld.%ld " 
               "(current_time is %ld.%ld)\n", me->id, 
               new->tv.tv_sec, new->tv.tv_usec, 
               tmp.tv_sec, tmp.tv_usec); 
      } 
 
      new->args = callable; 
      Pike_sp -= 2; 
      dmalloc_touch_svalue(Pike_sp); 
 
      adjust_up(me, me->num_pending_calls-1); 
      backend_verify_call_outs(me); 
 
#ifdef _REENTRANT 
      backend_wake_up_backend(me); 
#endif 
 
      UNPROTECT_CALL_OUTS(); 
    } 
  } 
  /*! @endclass 
   */ 
 
#undef THIS 
#define THIS THIS_BACKEND 
 
  static void backend_count_memory_in_call_outs(struct Backend_struct *me) 
  { 
    push_static_text("num_call_outs"); 
    push_int(me->num_pending_calls); 
 
    push_static_text("call_out_bytes"); 
    push_int64(me->call_heap_size * sizeof(struct Backend_CallOut_struct **)+ 
               me->num_pending_calls * sizeof(struct Backend_CallOut_struct)); 
 
  } 
 
  static void count_memory_in_call_outs(struct callback *UNUSED(foo), 
                                        void *UNUSED(bar), 
                                        void *UNUSED(gazonk)) 
  { 
    backend_count_memory_in_call_outs(default_backend); 
  } 
 
  /*! @decl mapping(string:int) get_stats() 
   *! 
   *! Get some statistics about the backend. 
   *! 
   *! @returns 
   *!   Returns a mapping with the follwoing content: 
   *!   @mapping 
   *!     @member int "num_call_outs" 
   *!       The number of active call-outs. 
   *!     @member int "call_out_bytes" 
   *!       The amount of memory used by the call-outs. 
   *!   @endmapping 
   */ 
  PIKEFUN mapping(string:int) get_stats() 
  { 
    struct svalue *save_sp = Pike_sp; 
    backend_count_memory_in_call_outs(THIS); 
    f_aggregate_mapping(Pike_sp - save_sp); 
    stack_pop_n_elems_keep_top(args); 
  } 
 
   /* FIXME */ 
#if 0 
   MARK 
     { 
       int e; 
       struct Backend_struct *me=THIS; 
 
       for(e=0;e<me->num_pending_calls;e++) 
       { 
         gc_mark(CALL(e)->args,0,"call out args"); 
       } 
     } 
#endif 
 
/*! @decl array call_out(function f, float|int delay, mixed ... args) 
 *! 
 *! Make a delayed call to a function. 
 *! 
 *! @[call_out()] places a call to the function @[f] with the argument 
 *! @[args] in a queue to be called in about @[delay] seconds. 
 *! 
 *! If @[f] returns @expr{-1@}, no other call out or callback will be 
 *! called by the backend in this round. I.e. @[`()] will return right 
 *! away. For the main backend that means it will immediately start 
 *! another round and check files and call outs anew. 
 *! 
 *! @returns 
 *!   Returns a call_out identifier that identifies this call_out. 
 *!   This value can be sent to eg @[find_call_out()] or @[remove_call_out()]. 
 *! 
 *! @seealso 
 *!   @[remove_call_out()], @[find_call_out()], @[call_out_info()], 
 *!   @[CallOut] 
 */ 
   PIKEFUN array call_out(mixed f, int|float t, mixed ... rest) 
     { 
       struct svalue tmp; 
       struct object *co; 
       struct Backend_CallOut_struct *c; 
 
       if(args<2) 
         SIMPLE_WRONG_NUM_ARGS_ERROR("call_out", 2); 
 
       if(TYPEOF(*t) != T_INT && TYPEOF(*t) != T_FLOAT) 
         SIMPLE_ARG_TYPE_ERROR("call_out", 2, "int|float"); 
 
       /* Swap, for compatibility */ 
       tmp = Pike_sp[-args]; 
       Pike_sp[-args] = Pike_sp[1-args]; 
       Pike_sp[1-args] = tmp; 
 
       apply_current(Backend_CallOut_program_fun_num, args); 
       args = 1; 
 
       get_all_args("low_call_out", args, "%o", &co); 
 
       c = get_storage(co, Backend_CallOut_program); 
 
       if (!c) Pike_error("Unexpected object from CallOut.\n"); 
 
       ref_push_array(c->args); 
 
       stack_pop_n_elems_keep_top(args); 
     } 
 
   /* Assumes current_time is correct on entry. */ 
   static int backend_do_call_outs(struct Backend_struct *me) 
     { 
       int call_count = 0; 
       int args; 
       struct timeval tmp, now; 
       backend_verify_call_outs(me); 
 
       INACCURATE_GETTIMEOFDAY(&now); 
       tmp.tv_sec = now.tv_sec; 
       tmp.tv_usec = now.tv_usec; 
       tmp.tv_sec++; 
       while(me->num_pending_calls && 
             my_timercmp(&CALL(0)->tv, <= ,&now)) 
       { 
         struct timeval now; 
         /* unlink call out */ 
         struct Backend_CallOut_struct *cc; 
         DECLARE_PROTECT_CALL_OUTS; 
 
         PROTECT_CALL_OUTS(); 
         cc=CALL(0); 
         if(--me->num_pending_calls) 
         { 
           MOVECALL(0,me->num_pending_calls); 
           adjust_down(me, 0); 
         } 
         CALL_(me->num_pending_calls) = NULL; 
         UNPROTECT_CALL_OUTS(); 
         cc->pos = -1; 
 
         args = cc->args->size; 
         if (cc->args->refs == 1) { 
           push_array_items(cc->args); 
         } else { 
           /* Somebody else also has references to the call_out id array. 
            * 
            * We need to clear it to reduce garbage. 
            */ 
           add_ref(cc->args); 
           push_array_items(cc->args); 
           clear_array(cc->args); 
           free_array(cc->args); 
         } 
         cc->args = NULL; 
         free_object(cc->this); 
 
         check_destructed(Pike_sp - args); 
         if(TYPEOF(Pike_sp[-args]) != T_INT) 
         { 
           COWERR("[%d]BACKEND[%d]: backend_do_call_outs: " 
                  "calling call out ", THR_NO, me->id); 
           IF_CO( 
             print_svalue (stderr, Pike_sp - args); 
             fputc ('\n', stderr); 
           ); 
           call_count++; 
           f_call_function(args); 
           if (TYPEOF(Pike_sp[-1]) == T_INT && Pike_sp[-1].u.integer == -1) { 
             pop_stack(); 
             backend_verify_call_outs(me); 
             call_count = -call_count; 
             break; 
           } 
           else 
             pop_stack(); 
         }else{ 
           COWERR("[%d]BACKEND[%d]: backend_do_call_outs: " 
                  "ignoring destructed call out\n", THR_NO, me->id); 
           pop_n_elems(args); 
         } 
         backend_verify_call_outs(me); 
 
         ACCURATE_GETTIMEOFDAY(&now); 
         if(my_timercmp(&now, > , &tmp)) break; 
       } 
 
       IF_CO ( 
         if (me->num_pending_calls) 
           fprintf (stderr, 
                    "BACKEND[%d]: backend_do_call_outs: stopping with %d " 
                    "call outs left, closest with time %ld.%ld " 
                    "(current_time %ld.%ld, limit at %ld.%ld)\n", 
                    me->id, me->num_pending_calls, 
                    CALL(0)->tv.tv_sec, CALL(0)->tv.tv_usec, 
                    now.tv_sec, now.tv_usec, 
                    tmp.tv_sec, tmp.tv_usec); 
         else 
           fprintf (stderr, "BACKEND[%d]: backend_do_call_outs: " 
                    "no outstanding call outs\n", 
                    me->id); 
       ); 
 
       return call_count; 
     } 
 
   /* NB: Calls Pike code, so MUST NOT be used in a PROTECT_CALL_OUTS() 
    *     context. 
    * 
    *     Note that a non-NULL return value only indicates that the 
    *     fun was found before the calls of is_eq(), which may have 
    *     caused the call_out to be removed. 
    */ 
   static struct array *backend_find_call_out_info(struct Backend_struct *me, 
                                                   struct svalue *fun) 
     { 
       size_t hval, fun_hval; 
       struct Backend_CallOut_struct *c; 
       struct svalue *save_sp = Pike_sp; 
       DECLARE_PROTECT_CALL_OUTS; 
 
       if(!me->num_pending_calls) return NULL; 
 
       PROTECT_CALL_OUTS(); 
 
       if(TYPEOF(*fun) == T_ARRAY) 
       { 
         hval=PTR_TO_INT(fun->u.array); 
         hval%=me->hash_size; 
         for(c=me->call_hash[hval].arr;c;c=c->next_arr) 
         { 
           if(c->args == fun->u.array) 
           { 
#ifdef PIKE_DEBUG 
             if(CALL(c->pos) != c) 
               Pike_fatal("Call_out->pos not correct!\n"); 
#endif 
             UNPROTECT_CALL_OUTS(); 
             add_ref(c->args); 
             return c->args; 
           } 
         } 
       } 
 
       fun_hval=hash_svalue(fun); 
       hval = fun_hval % me->hash_size; 
       for(c=me->call_hash[hval].fun;c;c=c->next_fun) 
       { 
         if(c->fun_hval == fun_hval) 
         { 
#ifdef PIKE_DEBUG 
           if(CALL(c->pos) != c) 
             Pike_fatal("Call_out->pos not correct!\n"); 
#endif 
           /* Delay the is_eq() call until we've finished 
            * scanning the hash table. 
            */ 
           ref_push_array(c->args); 
         } 
       } 
       UNPROTECT_CALL_OUTS(); 
 
       /* Note: is_eq() may call Pike code (which we want), 
        *       however, we can't let it modify the hash 
        *       table while we're scanning it. 
        */ 
       while (Pike_sp > save_sp) { 
         struct array *res = Pike_sp[-1].u.array; 
         /* FIXME: Use CYCLIC! */ 
         if (is_eq(fun, ITEM(res))) { 
           add_ref(res); 
           pop_n_elems(Pike_sp - save_sp); 
           return res; 
         } 
         pop_stack(); 
       } 
 
       return NULL; 
     } 
 
   /* Typically used in a PROTECT_CALL_OUTS() context. */ 
   static int backend_find_call_out(struct Backend_struct *me, 
                                    struct array *co_info) 
     { 
       size_t hval, fun_hval; 
       struct Backend_CallOut_struct *c; 
 
       if(!co_info || !me->num_pending_calls) return -1; 
 
       hval=PTR_TO_INT(co_info); 
       hval%=me->hash_size; 
       for(c=me->call_hash[hval].arr;c;c=c->next_arr) 
       { 
         if(c->args == co_info) 
         { 
#ifdef PIKE_DEBUG 
           if(CALL(c->pos) != c) 
             Pike_fatal("Call_out->pos not correct!\n"); 
#endif 
           return c->pos; 
         } 
       } 
 
       return -1; 
     } 
 
/*! @decl int _do_call_outs() 
 *! 
 *! Do all pending call_outs. 
 *! 
 *! This function runs all pending call_outs that should have been 
 *! run if Pike returned to the backend.  It should not be used in 
 *! normal operation. 
 *! 
 *! As a side-effect, this function sets the value returned by 
 *! @[time(1)] to the current time. 
 *! 
 *! @returns 
 *! Zero if no call outs were called, nonzero otherwise. 
 *! 
 *! @seealso 
 *! @[call_out()], @[find_call_out()], @[remove_call_out()] 
 */ 
   PIKEFUN int _do_call_outs() 
     { 
       INVALIDATE_CURRENT_TIME(); 
       RETURN backend_do_call_outs(THIS); 
     } 
 
/*! @decl int find_call_out(function f) 
 *! @decl int find_call_out(array id) 
 *! 
 *! Find a call out in the queue. 
 *! 
 *! This function searches the call out queue. If given a function as 
 *! argument, it looks for the first call out scheduled to that function. 
 *! 
 *! The argument can also be a call out id as returned by @[call_out()], in 
 *! which case that call_out will be found (Unless it has already been 
 *! called). 
 *! 
 *! @returns 
 *!   @[find_call_out()] returns the remaining time in seconds before that 
 *!   call_out will be executed. If no call_out is found, 
 *!   @[zero_type](@[find_call_out](f)) will return 1. 
 *! 
 *! @seealso 
 *!   @[call_out()], @[remove_call_out()], @[call_out_info()] 
 */ 
   PIKEFUN int find_call_out(function|mixed f) 
   { 
     struct Backend_struct *me=THIS; 
     struct array *co_info; 
 
     backend_verify_call_outs(me); 
 
     co_info = backend_find_call_out_info(me, f); 
 
     if(!co_info) 
     { 
       /* NB: This is a very exotic value! */ 
       SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1); 
       Pike_sp++; 
     } else { 
       int e; 
       struct timeval now; 
       DECLARE_PROTECT_CALL_OUTS; 
       PROTECT_CALL_OUTS(); 
       e = backend_find_call_out(me, co_info); 
       pop_n_elems(args); 
       free_array(co_info); 
       if (e == -1) { 
         /* NB: This is a very exotic value! */ 
         SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1); 
         Pike_sp++; 
       }else{ 
         INACCURATE_GETTIMEOFDAY(&now); 
         push_int(CALL(e)->tv.tv_sec - now.tv_sec); 
       } 
       UNPROTECT_CALL_OUTS(); 
     } 
     backend_verify_call_outs(me); 
   } 
 
/*! @decl int remove_call_out(function f) 
 *! @decl int remove_call_out(array id) 
 *! 
 *! Remove a call out from the call out queue. 
 *! 
 *! This function finds the first call to the function @[f] in the call_out 
 *! queue and removes it.  You can also give a call out id as argument (as 
 *! returned by @[call_out()]). 
 *! 
 *! @returns 
 *!   The remaining time in seconds left to that call out will be returned. 
 *!   If no call_out was found, @[zero_type](@[remove_call_out](@[f])) 
 *!   will return 1. 
 *! 
 *! @seealso 
 *!   @[call_out_info()], @[call_out()], @[find_call_out()] 
 */ 
   PIKEFUN int remove_call_out(function|mixed f) 
   { 
     struct Backend_struct *me=THIS; 
     struct array *co_info; 
 
     backend_verify_call_outs(me); 
 
     co_info = backend_find_call_out_info(me, f); 
 
     if(!co_info) 
     { 
       /* NB: This is a very exotic value! */ 
       SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1); 
       Pike_sp++; 
     } else { 
       int e; 
       DECLARE_PROTECT_CALL_OUTS; 
 
       PROTECT_CALL_OUTS(); 
       backend_verify_call_outs(me); 
       e = backend_find_call_out(me, co_info); 
       backend_verify_call_outs(me); 
       if(e!=-1) 
       { 
         struct Backend_CallOut_struct *c = CALL(e); 
         struct timeval now; 
 
         INACCURATE_GETTIMEOFDAY(&now); 
         COWERR("BACKEND[%d]: Removing call out at %ld.%ld " 
                "(current_time is %ld.%ld)\n", me->id, 
                c->tv.tv_sec, c->tv.tv_usec, 
                now.tv_sec, now.tv_usec); 
         pop_n_elems(args); 
         push_int(c->tv.tv_sec - now.tv_sec); 
 
         me->num_pending_calls--; 
         if(e!=me->num_pending_calls) 
         { 
           MOVECALL(e,me->num_pending_calls); 
           adjust(me,e); 
         } 
         CALL_(me->num_pending_calls) = NULL; 
         c->pos = -1; 
 
         free_object(c->this); 
       }else{ 
         pop_n_elems(args); 
         /* NB: This is a very exotic value! */ 
         SET_SVAL(*Pike_sp, T_INT, NUMBER_UNDEFINED, integer, -1); 
         Pike_sp++; 
       } 
 
       /* Make sure not to keep any references in the 
        * now stale co_info array. */ 
       clear_array(co_info); 
 
       free_array(co_info); 
       backend_verify_call_outs(me); 
       UNPROTECT_CALL_OUTS(); 
     } 
   } 
 
/* return an array containing info about all call outs: 
 * ({  ({ delay, caller, function, args, ... }), ... }) 
 */ 
   struct array *backend_get_all_call_outs(struct Backend_struct *me) 
     { 
       int e; 
       struct array *ret; 
       struct timeval now; 
       ONERROR err; 
       DECLARE_PROTECT_CALL_OUTS; 
 
       backend_verify_call_outs(me); 
       PROTECT_CALL_OUTS(); 
       ret=allocate_array_no_init(0, me->num_pending_calls); 
       SET_ONERROR(err, do_free_array, ret); 
       ret->type_field = BIT_ARRAY; 
       if(me->num_pending_calls) INACCURATE_GETTIMEOFDAY(&now); 
       for(e=0;e<me->num_pending_calls;e++) 
       { 
         struct array *v; 
         v=allocate_array_no_init(CALL(e)->args->size+2, 0); 
         ITEM(v)[0].u.integer=CALL(e)->tv.tv_sec - now.tv_sec; 
 
         /* FIXME: ITEM(v)[1] used to be the current object 
          *        from when the call_out was created, but 
          *        that is always the backend since the 
          *        backend.cmod rewrite. 
          *        Now we just leave it zero. 
          */ 
         v->type_field = BIT_INT; 
 
         v->type_field |= 
           assign_svalues_no_free(ITEM(v)+2, 
                                  ITEM(CALL(e)->args), 
                                  CALL(e)->args->size,BIT_MIXED); 
 
         SET_SVAL(ITEM(ret)[e], T_ARRAY, 0, array, v); 
         ret->size++; 
       } 
       UNSET_ONERROR(err); 
       UNPROTECT_CALL_OUTS(); 
       return ret; 
     } 
 
/*! @decl array(array) call_out_info() 
 *! 
 *! Get info about all call_outs. 
 *! 
 *! This function returns an array with one entry for each entry in the 
 *! call out queue. The first in the queue will be at index 0. Each index 
 *! contains an array that looks like this: 
 *! @array 
 *!   @elem int time_left 
 *!     Time remaining in seconds until the call_out is to be performed. 
 *!   @elem int(0..0) zero 
 *!     Used to be the object that scheduled the call_out. 
 *!   @elem function fun 
 *!     Function to be called. 
 *!   @elem mixed ... args 
 *!     Arguments to the function. 
 *! @endarray 
 *! 
 *! @seealso 
 *!   @[call_out()], @[find_call_out()], @[remove_call_out()] 
 */ 
   PIKEFUN array(array) call_out_info() 
     { 
       RETURN backend_get_all_call_outs(THIS); 
     } 
 
  /* 
   * FD box handling 
   */ 
 
#define GET_ACTIVE_BOX(ME, FD)                                          \ 
   ((ME)->fd_boxes[(FD) - (ME)->fd_boxes_start]) 
 
#define GET_BOX(ME, FD)                                                 \ 
   ((FD) < 0 ?                                                          \ 
    (ME)->inactive_boxes[~(FD)] :                                       \ 
    (ME)->fd_boxes[(FD) - (ME)->fd_boxes_start]) 
 
#define SAFE_GET_ACTIVE_BOX(ME, FD)                                     \ 
   ((FD) >= (ME)->fd_boxes_start &&                                     \ 
    (FD) < (ME)->fd_boxes_start + (ME)->fd_boxes_size ?                 \ 
    GET_ACTIVE_BOX (ME, FD) : NULL) 
 
 
   static struct fd_callback_box *safe_get_box (struct Backend_struct *me, 
                                                int fd) 
   { 
     if (fd < 0) { 
       fd = ~fd; 
       if (fd < me->inactive_boxes_size) { 
         struct fd_callback_box *box = me->inactive_boxes[fd]; 
         /* Avoid free list pointers. */ 
         if ((struct fd_callback_box **) box < me->inactive_boxes || 
             (struct fd_callback_box **) box >= 
             (me->inactive_boxes + me->inactive_boxes_size)) 
           return box; 
       } 
     } 
     else { 
       fd -= me->fd_boxes_start; 
       if (fd >= 0 && fd < me->fd_boxes_size) 
         return me->fd_boxes[fd]; 
     } 
     return NULL; 
   } 
 
   /** 
    * Get the fd_callback_box corresponding to an fd. 
    */ 
   PMOD_EXPORT struct fd_callback_box 
     *get_fd_callback_box_for_fd( struct Backend_struct *me, int fd ) 
   { 
       return safe_get_box( me, fd ); 
   } 
 
/* NOTE: Some versions of AIX seem to have a 
 *         #define events reqevents 
 *       in one of the poll headerfiles. This will break 
 *       the fd_box event handling. 
 */ 
#undef events 
 
   /* Make sure the WANT_EVENT() macro is useable... */ 
#undef READ 
#undef WRITE 
#undef READ_OOB 
#undef WRITE_OOB 
#undef FS_EVENT 
#undef ERROR 
 
#define WANT_EVENT(BOX, WHAT)                                           \ 
   ((BOX) && (BOX)->events & PIKE_CONCAT (PIKE_BIT_FD_, WHAT)) 
 
#define FOR_EACH_ACTIVE_FD_BOX(ME, BOX_VAR)                             \ 
   struct Backend_struct *me_ = (ME);                                   \ 
   struct fd_callback_box *BOX_VAR, **boxes_ = me_->fd_boxes;           \ 
   int b_, max_ = me_->fd_boxes_size;                                   \ 
   for (b_ = 0; b_ < max_; b_++)                                        \ 
     if ((BOX_VAR = boxes_[b_])) 
 
#define FOR_EACH_INACTIVE_FD_BOX(ME, BOX_VAR)                           \ 
   struct Backend_struct *me_ = (ME);                                   \ 
   struct fd_callback_box *BOX_VAR, **boxes_ = me_->inactive_boxes;     \ 
   int b_ = 0, max_ = me_->inactive_boxes_size;                         \ 
   for (b_ = 0; b_ < max_; b_++)                                        \ 
     if ((BOX_VAR = boxes_[b_]) &&                                      \ 
         /* Avoid free list pointers. */                                \ 
         ((struct fd_callback_box **) BOX_VAR < boxes_ ||               \ 
          (struct fd_callback_box **) BOX_VAR >= boxes_+max_)) 
 
#ifdef PIKE_DEBUG 
  static void check_box (struct fd_callback_box *box, int fd) 
  { 
    struct Backend_struct *me; 
    if (!box) return; 
    if (!(me = box->backend)) 
      Pike_fatal("fd_callback_box not hooked to any backend.\n"); 
    if (fd == INT_MAX) 
      fd = box->fd; 
    else if (fd != box->fd) 
      Pike_fatal("fd in callback box doesn't contain the expected fd.\n"); 
    if (safe_get_box (me, fd) != box) 
      Pike_fatal("fd_callback_box not hooked in correctly for fd %d.\n", 
                 box->fd); 
  } 
#else 
#  define check_box(box, fd) do {} while (0) 
#endif 
 
   static void add_fd_box (struct fd_callback_box *box) 
   { 
     struct Backend_struct *me = box->backend; 
     int fd = box->fd; 
 
#ifdef PIKE_DEBUG 
     if (fd >= 0) { 
       struct fd_callback_box *old_box = SAFE_GET_ACTIVE_BOX (me, fd); 
       if (old_box == box) 
         Pike_fatal ("The box is already hooked in.\n"); 
       if (old_box) 
         Pike_fatal ("There's another callback box %p for fd %d.\n", 
                     old_box, fd); 
       if (get_backend_for_fd (fd) && get_backend_for_fd (fd) != me) 
         Pike_fatal ("The fd is allocated to another backend.\n"); 
     } 
     else { 
       int i; 
       for (i = 0; i < me->inactive_boxes_size; i++) 
         if (me->inactive_boxes[i] == box) 
           Pike_fatal ("The box is already hooked in.\n"); 
     } 
#endif 
 
     if (fd >= 0) { 
       low_set_backend_for_fd (fd, me); 
 
       if (!me->fd_boxes_size) { 
         /* Start small since backends with only a single fd aren't uncommon. */ 
         me->fd_boxes_size = 4; 
         me->fd_boxes = calloc (sizeof (me->fd_boxes[0]), me->fd_boxes_size); 
         if (!me->fd_boxes) 
           Pike_fatal ("Out of memory in backend::add_fd_box(): " 
                       "Tried to allocate %d fd_callback_box pointers\n", 
                       me->fd_boxes_size); 
         me->fd_boxes_start = fd; 
         fd = 0; 
       } 
 
       else if (fd < me->fd_boxes_start) { 
         int old_size = me->fd_boxes_size, shift = me->fd_boxes_size; 
         struct fd_callback_box **old_boxes = me->fd_boxes; 
         PDWERR("me->fd_boxes: %p (%d ==> %d)\n", 
                me->fd_boxes, me->fd_boxes_start, fd); 
         while (fd < me->fd_boxes_start - shift) shift *= 2; 
         if (me->fd_boxes_start - shift < 0) shift = me->fd_boxes_start; 
         me->fd_boxes_start -= shift; 
         me->fd_boxes_size += shift; 
         debug_malloc_touch(me->fd_boxes); 
         me->fd_boxes = 
           realloc (me->fd_boxes, sizeof (me->fd_boxes[0]) * me->fd_boxes_size); 
         if (!me->fd_boxes) 
           Pike_fatal ("Out of memory in backend::add_fd_box(): " 
                       "Tried to allocate %d fd_callback_box pointers\n", 
                       me->fd_boxes_size); 
         memmove (me->fd_boxes + shift, me->fd_boxes, 
                  sizeof (me->fd_boxes[0]) * old_size); 
         memset (me->fd_boxes, 0, sizeof (me->fd_boxes[0]) * shift); 
         debug_malloc_touch(me->fd_boxes); 
         fd -= me->fd_boxes_start; 
       } 
 
       else { 
         fd -= me->fd_boxes_start; 
         if (fd >= me->fd_boxes_size) { 
           int old_size=me->fd_boxes_size; 
           while(fd >= me->fd_boxes_size) me->fd_boxes_size*=2; 
           debug_malloc_touch(me->fd_boxes); 
           me->fd_boxes = 
             realloc(me->fd_boxes, sizeof(me->fd_boxes[0]) * me->fd_boxes_size); 
           if( !me->fd_boxes ) 
             Pike_fatal("Out of memory in backend::add_fd_box(): " 
                        "Tried to allocate %d fd_callback_box pointers\n", 
                        me->fd_boxes_size); 
           memset(me->fd_boxes+old_size, 
                  0, 
                  (me->fd_boxes_size-old_size)*sizeof(me->fd_boxes[0])); 
           debug_malloc_touch(me->fd_boxes); 
         } 
       } 
 
       me->fd_boxes[fd] = box; 
     } 
 
     else {                     /* Add an inactive box. */ 
       int pos; 
 
       if (!me->free_inactive_box) { 
         pos = me->inactive_boxes_size; 
         if (!me->inactive_boxes_size) 
           me->inactive_boxes_size = 4; 
         else 
           me->inactive_boxes_size *= 2; 
         if (!me->inactive_boxes) 
           me->inactive_boxes = 
             malloc (sizeof (me->inactive_boxes[0]) * me->inactive_boxes_size); 
         else { 
#ifdef PIKE_DEBUG 
           { 
             struct fd_callback_box **p, **boxes = me->inactive_boxes; 
             int i; 
             int max = pos; /* me->inactive_boxes_size before enlargement. */ 
             for (i = 0; i < max; i++) 
               if ((p = (struct fd_callback_box **) boxes[i]) && 
                   p >= boxes && p < boxes + max) 
                 Pike_fatal ("Still got free list pointers in inactive box " 
                             "list that is about to be enlarged.\n"); 
           } 
#endif 
           me->inactive_boxes = 
             realloc (me->inactive_boxes, 
                      sizeof (me->inactive_boxes[0]) * me->inactive_boxes_size); 
         } 
         if (!me->inactive_boxes) 
           Pike_fatal ("Out of memory in backend::add_fd_box(): " 
                       "Tried to allocate %d inactive " 
                       "fd_callback_box pointers.\n", 
                       me->inactive_boxes_size); 
         me->free_inactive_box = me->inactive_boxes + pos; 
         while (++pos < me->inactive_boxes_size) 
           me->inactive_boxes[pos - 1] = 
             (struct fd_callback_box *) (me->inactive_boxes + pos); 
         me->inactive_boxes[pos - 1] = NULL; 
       } 
 
       pos = me->free_inactive_box - me->inactive_boxes; 
       me->free_inactive_box = 
         (struct fd_callback_box **)*me->free_inactive_box; 
 
       me->inactive_boxes[pos] = box; 
       box->fd = ~pos; 
     } 
   } 
 
   static void remove_fd_box (struct fd_callback_box *box) 
   { 
     struct Backend_struct *me = box->backend; 
     int fd = box->fd; 
     check_box (box, INT_MAX); 
 
     /* FIXME: Shrink arrays? */ 
 
     if (fd >= 0) { 
       low_set_backend_for_fd (fd, NULL); 
       me->fd_boxes[fd - me->fd_boxes_start] = NULL; 
     } 
     else { 
       fd = ~fd; 
       me->inactive_boxes[fd] = 
         (struct fd_callback_box *)me->free_inactive_box; 
       me->free_inactive_box = me->inactive_boxes + fd; 
     } 
   } 
 
   static void update_fd_set (struct Backend_struct *me, int fd, 
                              int old_events, int new_events, int flags) 
   { 
     if (fd < 0) return; 
 
#ifdef __NT__ 
     if (new_events && 
         !(fd_query_properties(fd, fd_CAN_NONBLOCK) & fd_CAN_NONBLOCK)) { 
       Pike_fatal("update_fd_set() on non-socket!\n"); 
     } 
#endif /* __NT__ */ 
 
     PDWERR("update_fd_set(%p, %d, 0x%08x, 0x%08x)\n", 
            me, fd, old_events, new_events) ; 
 
     if (me->update_fd_set_handler) { 
       me->update_fd_set_handler(me, me->handler_data, 
                                 fd, old_events, new_events, flags); 
     } else { 
       Pike_fatal("No update_fd_set_handler set[%p, %d, 0x%04x, 0x%04x].\n"); 
     } 
   } 
 
   /** 
    * Update the backend that the box is associated with to 
    * a new set of events to notify for. 
    */ 
  PMOD_EXPORT void hook_fd_callback_box (struct fd_callback_box *box) 
  { 
    struct Backend_struct *me = box->backend; 
    int fd = box->fd; 
    PDWERR("[%d]BACKEND[%d]: hook_fd_callback_box: " 
           "fd %d, events 0x%x, object %p\n", 
           THR_NO, me->id, fd, box->events, box->ref_obj); 
#ifdef PIKE_DEBUG 
    if (!me) Pike_fatal ("Backend not set.\n"); 
#endif 
#ifdef __NT__ 
    if ((fd >= 0) && box->events && 
        !(fd_query_properties(fd, fd_CAN_NONBLOCK) & fd_CAN_NONBLOCK)) { 
      Pike_fatal("hook_fd_callback_box() on non-socket!\n" 
                 "  fd: %d\n" 
                 "  events: 0x%04x\n" 
                 "  fd_properties: 0x%04x\n", 
                 fd, box->events, fd_query_properties(fd, fd_CAN_NONBLOCK)); 
    } 
#endif /* __NT__ */ 
    add_fd_box (box); 
    if (fd >= 0) update_fd_set (me, fd, 0, box->events, box->flags); 
    if (box->ref_obj && box->events) add_ref (box->ref_obj); 
  } 
 
  /** 
   * Unlink the box from the active list it is on (if any), 
   * and return 1 if it was active (and thus needs to be freed). 
   */ 
  static int unlink_box(struct fd_callback_box *box) 
  { 
    if (box->next) { 
      /* The box is active in some backend. Unlink it. */ 
      struct fd_callback_box *pred = box->next; 
      /* Find the predecessor. */ 
      while (pred->next != box) { 
        pred = pred->next; 
      } 
      pred->next = box->next; 
      box->next = NULL; 
      return 1; 
    } 
    return 0; 
  } 
 
  /** 
   * Unhook a box from its associated backend (if any), 
   * and clear any events. 
   */ 
  PMOD_EXPORT void unhook_fd_callback_box (struct fd_callback_box *box) 
  { 
    /* Accept an unhooked box; can happen when we're called from an 
     * object exit hook due to being freed by free_object below. */ 
    if (!box->backend) { 
      PDWERR("[%d]BACKEND[unhooked box]: unhook_fd_callback_box: " 
             "fd %d, object %p\n", THR_NO, box->fd, box->ref_obj); 
      return; 
    } 
 
    check_box (box, INT_MAX); 
    PDWERR("[%d]BACKEND[%d]: unhook_fd_callback_box: fd %d, object %p\n", 
           THR_NO, box->backend->id, box->fd, box->ref_obj); 
 
    if (box->fd >= 0) { 
      update_fd_set (box->backend, box->fd, box->events, 0, box->flags); 
    } 
    remove_fd_box (box); 
    box->backend = NULL; 
    /* Make sure no further callbacks are called on this box. */ 
    box->revents = 0; 
    box->rflags = 0; 
 
    /* Unlink the box from the active list in case it is there, 
     * otherwise the active list may get cut, which could cause 
     * the remaining boxes on the list to be lost from the backend.. 
     */ 
    if (unlink_box(box) && box->ref_obj) { 
      /* Use gc safe method to allow calls from within the gc. */ 
      /* box->ref_obj is only converted from a counted to 
       * non-counted ref, so it shouldn't be clobbered by the free. */ 
      union anything u; 
      u.object = box->ref_obj; 
      gc_free_short_svalue (&u, T_OBJECT); 
    } 
 
    if (box->ref_obj && box->events) { 
      /* Use gc safe method to allow calls from within the gc. */ 
      /* box->ref_obj is only converted from a counted to 
       * non-counted ref, so it shouldn't be clobbered by the free. */ 
      union anything u; 
      u.object = box->ref_obj; 
      gc_free_short_svalue (&u, T_OBJECT); 
      box->events = 0; 
    } 
  } 
 
  /** 
   * Set the event mask for a hooked box. 
   */ 
  PMOD_EXPORT void set_fd_callback_events (struct fd_callback_box *box, 
                                           int events, int flags) 
  { 
    int old_events = box->events; 
    check_box (box, INT_MAX); 
    PDWERR("[%d]BACKEND[%d]: set_fd_callback_events: " 
           "fd %d, events from 0x%x to 0x%x, object %p\n", 
           THR_NO, box->backend->id, box->fd, old_events, events, 
           box->ref_obj); 
    if (box->fd >= 0) { 
      update_fd_set (box->backend, box->fd, old_events, events, flags); 
    } 
    box->events = events; 
    box->flags = flags; 
 
    if (box->ref_obj) { 
      if (!old_events) { 
        if (events) add_ref (box->ref_obj); 
      } 
      else 
        if (!events) { 
          /* Use gc safe method to allow calls from within the gc. */ 
          /* box->ref_obj is only converted from a counted to 
           * non-counted ref, so it shouldn't be clobbered by the free. */ 
          union anything u; 
          u.object = box->ref_obj; 
          gc_free_short_svalue (&u, T_OBJECT); 
        } 
    } 
  } 
 
  /** 
   * Change the backend that a box is to be associated with. 
   */ 
  PMOD_EXPORT void change_backend_for_box (struct fd_callback_box *box, 
                                           struct Backend_struct *new) 
  { 
    struct Backend_struct *old = box->backend; 
    if (old) check_box (box, INT_MAX); 
 
#ifdef PIKE_DEBUG 
    if (!new) Pike_fatal ("New backend is invalid.\n"); 
#endif 
 
    PDWERR("[%d]BACKEND[%d]: change_backend_for_box: " 
           "fd %d, new backend %d\n", 
           THR_NO, old?old->id:-1, box->fd, new->id); 
 
    if (old != new) { 
      if (old) { 
        if (box->fd >= 0) { 
          update_fd_set (old, box->fd, box->events, 0, box->flags); 
        } 
        remove_fd_box (box); 
 
        /* Unlink the box from the active in the old backend, 
         * and free it if it was there. 
         */ 
        if (unlink_box(box) && box->ref_obj) { 
          free_object(box->ref_obj); 
        } 
      } 
      box->backend = new; 
      add_fd_box (box); 
      if (box->fd >= 0) { 
        update_fd_set (new, box->fd, 0, box->events, box->flags); 
      } 
    } 
  } 
 
  /** 
   * Change the fd that a box refers to. 
   */ 
  PMOD_EXPORT void change_fd_for_box (struct fd_callback_box *box, int new_fd) 
  { 
    int old_fd = box->fd; 
 
    if (!box->backend) { 
      /* Convenience so that the caller doesn't have to check if the 
       * box is hooked in. */ 
      PDWERR("[%d]BACKEND[unhooked box]: change_fd_for_box: " 
             "fd from %d to %d, obj: %p\n", 
             THR_NO, old_fd, new_fd, box->ref_obj); 
      box->fd = new_fd; 
      box->revents = 0; 
      box->rflags = 0; 
    } 
 
    else { 
      check_box (box, INT_MAX); 
 
      if (old_fd >= 0 ? old_fd != new_fd : new_fd >= 0) { 
        if (old_fd >= 0) { 
          update_fd_set (box->backend, old_fd, box->events, 0, box->flags); 
        } 
        remove_fd_box (box); 
        box->fd = new_fd; 
        add_fd_box (box); 
        new_fd = box->fd; 
        box->revents = 0; 
        box->rflags = 0; 
        if (new_fd >= 0) { 
          update_fd_set (box->backend, new_fd, 0, box->events, box->flags); 
        } 
      } 
 
      PDWERR("[%d]BACKEND[%d]: change_fd_for_box: fd from %d to %d\n", 
             THR_NO, box->backend->id, old_fd, new_fd); 
    } 
  } 
 
  static void do_free_fd_box(struct fd_callback_box *box) 
  { 
    if (box->ref_obj) free_object(box->ref_obj); 
  } 
 
  static void do_free_fd_list(struct fd_callback_box *fd_list) 
  { 
    struct fd_callback_box *box; 
    while ((box = fd_list->next)) { 
      fd_list->next = box->next; 
      box->next = NULL; 
      if (box->ref_obj) free_object(box->ref_obj); 
    } 
  } 
 
#ifdef PIKE_DEBUG 
 
  static void backend_do_debug(struct Backend_struct *me) 
  { 
    backend_verify_call_outs(me); 
 
    if (me->debug_handler) { 
      me->debug_handler(me, me->handler_data); 
    } 
 
    {FOR_EACH_ACTIVE_FD_BOX (me, box) check_box (box, INT_MAX);} 
    {FOR_EACH_INACTIVE_FD_BOX (me, box) check_box (box, INT_MAX);} 
  } 
 
#endif  /* PIKE_DEBUG */ 
 
  /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time) 
   *!   Perform one pass through the backend. 
   *! 
   *!   Calls any outstanding call-outs and non-blocking I/O 
   *!   callbacks that are registred in this backend object. 
   *! 
   *! @param sleep_time 
   *!   Wait at most @[sleep_time] seconds. The default when 
   *!   unspecified or the integer @expr{0@} is no time limit. 
   *! 
   *! @returns 
   *!   If the backend did call any callbacks or call outs then the 
   *!   time spent in the backend is returned as a float. Otherwise 
   *!   the integer @expr{0@} is returned. 
   *! 
   *! @note 
   *!   If multiple threads concurrently call this function, then: 
   *!   @ul 
   *!     @item 
   *!       One of the threads will be the controlling thread. 
   *!     @item 
   *!       All callbacks will be called from the controlling thread. 
   *!     @item 
   *!       All threads will be woken up when the controlling thread 
   *!       is done. This may be prematurely if the controlling thread 
   *!       had a shorter timeout. 
   *!   @endul 
   *! 
   *! @note 
   *!   The backend may also be woken up prematurely if the set 
   *!   of events to monitor is changed. 
   *! 
   *! @note 
   *!   Multiple concurrent calls was not supported prior to Pike 8.0. 
   *! 
   *! @seealso 
   *!   @[Pike.DefaultBackend], @[main()] 
   */ 
  PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time) 
    prototype; 
  { 
  } 
 
#ifndef tObjImpl_THREAD_THREAD 
  /* Kludge for precompile.pike; it resolves object(Thread.Thread) 
   * to tObjImpl_THREAD_THREAD, while "program_id.h" only knows about 
   * tObjImpl_THREAD_ID. 
   */ 
#define tObjImpl_THREAD_THREAD  tObjImpl_THREAD_ID 
#endif /* !tObjImpl_THREAD_THREAD */ 
 
  /*! @decl Thread.Thread executing_thread() 
   *! @decl int executing_thread() 
   *! 
   *! Return the thread currently executing in the backend. I.e. the 
   *! thread that has called @[`()] and hasn't exited from that call. 
   *! Zero is returned if there's no thread in the backend. 
   *! 
   *! If Pike is compiled without thread support then @expr{1@} is 
   *! returned if we're inside the backend, @expr{0@} otherwise. 
   */ 
  PIKEFUN object(Thread.Thread)|int(0..1) executing_thread() 
    /* FIXME: The type is too weak, but precompile.pike doesn't 
     * understand different function variants in cpp branches. */ 
  { 
    pop_n_elems (args); 
#ifdef PIKE_THREADS 
    if (THIS->exec_thread) 
      ref_push_object (THIS->exec_thread->thread_obj); 
    else 
      push_int (0); 
#else 
    push_int (THIS->exec_thread); 
#endif 
  } 
 
  /*! @decl void add_file(Stdio.File|Stdio.FILE f) 
   *! 
   *! Register a file to be handled by this backend. 
   *! 
   *! @param f 
   *!   File to register. 
   *! 
   *! Registers @[f] to be handled by this backend. 
   *! This simply does @expr{f->set_backend(backend)@} where 
   *! @expr{backend@} is this object. 
   *! 
   *! @seealso 
   *!   @[Pike.DefaultBackend], @[main()] 
   */ 
  PIKEFUN void add_file(object f) 
  { 
    ref_push_object (Pike_fp->current_object); 
    apply (f, "set_backend", 1); 
  } 
 
 
   /*! @decl int id() 
    *! 
    *! Return an integer that uniquely identifies this backend. For 
    *! the default backend that integer is @expr{0@}. 
    */ 
   PIKEFUN int id() 
   { 
     RETURN (THIS->id); 
   } 
 
   PIKEFUN string _sprintf(int type, mapping flags) 
   { 
     if (type == 'O') { 
       push_static_text ("Pike.Backend(%d)"); 
       push_int (THIS->id); 
       f_sprintf (2); 
       stack_pop_n_elems_keep_top (args); 
     } 
     else { 
       pop_n_elems (args); 
       push_int (0); 
     } 
   } 
 
  extern int pike_make_pipe(int *); 
 
  GC_CHECK 
  { 
    struct Backend_struct *me = 
      (struct Backend_struct *) Pike_fp->current_storage; 
    int e; 
 
    for (e = 0; e < me->num_pending_calls; e++) { 
      if (CALL(e)->this) 
        debug_gc_check (CALL(e)->this, 
                        " as call out in backend object"); 
    } 
 
    {FOR_EACH_ACTIVE_FD_BOX (me, box) { 
        check_box (box, INT_MAX); 
        if (box->ref_obj && box->events) 
          debug_gc_check (box->ref_obj, " as container object " 
                          "for an active callback in backend object"); 
      }} 
    {FOR_EACH_INACTIVE_FD_BOX (me, box) { 
        check_box (box, INT_MAX); 
        if (box->ref_obj && box->events) 
          debug_gc_check (box->ref_obj, " as container object " 
                          "for an inactive callback in backend object"); 
      }} 
  } 
 
  GC_RECURSE 
  { 
    struct Backend_struct *me = 
      (struct Backend_struct *) Pike_fp->current_storage; 
    int e; 
 
    for (e = 0; e < me->num_pending_calls; e++) { 
      if (CALL(e)->this) 
        gc_recurse_short_svalue ((union anything *) &CALL(e)->this, T_OBJECT); 
    } 
 
    {FOR_EACH_ACTIVE_FD_BOX (me, box) { 
        if (box->ref_obj && box->events) 
          gc_recurse_short_svalue ((union anything *) &box->ref_obj, T_OBJECT); 
      }} 
    {FOR_EACH_INACTIVE_FD_BOX (me, box) { 
        if (box->ref_obj && box->events) 
          gc_recurse_short_svalue ((union anything *) &box->ref_obj, T_OBJECT); 
      }} 
  } 
 
  static void low_backend_cleanup (struct Backend_struct *me) 
  { 
    me->exec_thread = 0; 
  } 
 
  /** 
   * Generic backend setup code. 
   * 
   * Returns 0 if the backend needs to be polled, 
   *         1 if a different thread has run the backend to completion, 
   *        -1 on timeout (a different thread is running the backend, 
   *           but it has not completed in time). 
   */ 
  static int low_backend_once_setup(struct Backend_struct *me, 
                                    struct timeval *start_time) 
  { 
#ifdef PIKE_DEBUG 
    struct timeval max_timeout; 
#endif 
    struct timeval *next_timeout = &me->next_timeout, now; 
 
    alloca(0);                  /* Do garbage collect */ 
#ifdef PIKE_DEBUG 
    if(d_flag > 1) do_debug(); 
#endif 
 
#ifndef OWN_GETHRTIME 
    ACCURATE_GETTIMEOFDAY(&now); 
#else 
    /* good place to run the gethrtime-conversion update 
       since we have to run gettimeofday anyway /Mirar */ 
    INACCURATE_GETTIMEOFDAY(&now); 
    own_gethrtime_update(&now); 
#endif 
    if (start_time->tv_sec < 0) { 
      next_timeout->tv_sec = -1; 
      next_timeout->tv_usec = 0; 
    } 
    else { 
      *next_timeout = *start_time; 
      my_add_timeval(next_timeout, &now); 
    } 
 
    *start_time = now; 
 
    if(me->exec_thread) { 
#ifdef PIKE_THREADS 
      if (me->exec_thread != Pike_interpreter.thread_state) { 
        /* The backend is busy in some other thread. 
         * 
         * It will wake us up with a cond-signal when it is done. 
         */ 
        int old_done_counter = me->done_counter; 
        SWAP_OUT_CURRENT_THREAD(); 
        if (next_timeout->tv_sec < 0) { 
          co_wait_interpreter(&me->backend_signal); 
        } else { 
          co_wait_interpreter_timeout(&me->backend_signal, 
                                      next_timeout->tv_sec, 
                                      next_timeout->tv_usec * 1000); 
        } 
        SWAP_IN_CURRENT_THREAD(); 
        if (me->done_counter == old_done_counter) { 
          return -1; 
        } 
        return 1; 
      } else 
#endif 
      { 
        /* It's actually not a problem to make this function 
         * reentrant, but that'd introduce a risk of races in the 
         * callbacks (i.e. between when a read callback is called 
         * and when it reads the data), and besides I can't think 
         * of any sane way to use it. Also, this error can help 
         * discover otherwise tricky bugs. /mast */ 
#ifndef BACKEND_USES_CFRUNLOOP 
        Pike_error ("Backend already running - cannot reenter.\n"); 
#else 
        ; /* Do nothing, as we can call setup more than once. */ 
#endif 
      } 
    } 
#ifdef PIKE_THREADS 
    me->exec_thread = Pike_interpreter.thread_state; 
#else 
    me->exec_thread = 1; 
#endif 
 
    /* Call outs */ 
    if(me->num_pending_calls) 
      if(next_timeout->tv_sec < 0 || 
         my_timercmp(& CALL(0)->tv, < , next_timeout)) 
        *next_timeout = CALL(0)->tv; 
 
#ifdef PIKE_DEBUG 
    max_timeout = *next_timeout; 
#endif 
    call_callback(& me->backend_callbacks, me); 
#ifdef PIKE_DEBUG 
    if (max_timeout.tv_sec >= 0 && 
        (next_timeout->tv_sec < 0 || 
         my_timercmp (&max_timeout, <, next_timeout))) 
      Pike_fatal ("Timeout raised from %lu.%lu to %lu.%lu " 
                  "by a backend callback.\n", 
                  (unsigned long)max_timeout.tv_sec, 
                  (unsigned long)max_timeout.tv_usec, 
                  (unsigned long)next_timeout->tv_sec, 
                  (unsigned long)next_timeout->tv_usec); 
#endif 
 
    if (next_timeout->tv_sec < 0) { 
      /* Wait "forever". */ 
      next_timeout->tv_sec = 100000000; 
      next_timeout->tv_usec = 0; 
    } 
    else if(my_timercmp(next_timeout, > , &now)) 
    { 
      my_subtract_timeval(next_timeout, &now); 
    }else{ 
      next_timeout->tv_usec = 0; 
      next_timeout->tv_sec = 0; 
    } 
 
    return 0; 
  } 
 
 
  /* Call callbacks for the active events. 
   * 
   * NOTE: The first element in the fd_list is a sentinel! 
   * 
   * returns 1 on early exit. 
   */ 
  static int backend_call_active_callbacks(struct fd_callback_box *fd_list, 
                                           struct Backend_struct *PDUNUSED(me)) 
  { 
    struct fd_callback_box *box; 
    while((box = fd_list->next)) 
    { 
      int fd = box->fd; 
      ONERROR uwp; 
 
      /* Unhook the box. */ 
      fd_list->next = box->next; 
      box->next = NULL; 
      SET_ONERROR(uwp, do_free_fd_box, box); 
 
      PDWERR("[%d]BACKEND[%d]: Examining box for fd %d revents:0x%04x\n", 
             THR_NO, me->id, fd, box->revents); 
 
      if (box->fd < 0) { 
        /* The box is no longer active. 
         * Or we have found our sentinel fd_list. 
         * 
         * Note that the loop will terminate, since we 
         * have broken the cycle above when we set 
         * box->next to NULL. 
         */ 
        CALL_AND_UNSET_ONERROR(uwp); 
        continue; 
      } 
 
      /* From the roxen-chat re: connecttest.pike/FreeBSD: 
       * 
       * kqueue is returning the correct info: 
       * {7,EVFILT_READ,EV_ADD|EV_EOF,61,0x0,0x0}. 
       * errno 61 is ECONNREFUSED 
       */ 
      if (box->revents & box->events & PIKE_BIT_FD_READ_OOB) { 
        PDWERR("[%d]BACKEND[%d]: read_oob_callback(%d, %p)\n", 
               THR_NO, me->id, fd, box->ref_obj); 
        errno = 0; 
        if (box->callback (box, PIKE_FD_READ_OOB) == -1) { 
          CALL_AND_UNSET_ONERROR(uwp); 
          goto backend_round_done; 
        } 
      } 
 
      if (box->revents & box->events & PIKE_BIT_FD_READ) { 
        /* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in 
         *        the kqueue case. 
         */ 
        PDWERR("[%d]BACKEND[%d]: read_callback(%d, %p)\n", 
               THR_NO, me->id, fd, box->ref_obj); 
        errno = 0; 
        if (box->callback (box, PIKE_FD_READ) == -1) { 
          CALL_AND_UNSET_ONERROR(uwp); 
          goto backend_round_done; 
        } 
      } 
 
      if (box->revents & box->events & PIKE_BIT_FD_WRITE_OOB) { 
        PDWERR("[%d]BACKEND[%d]: write_oob_callback(%d, %p)\n", 
               THR_NO, me->id, fd, box->ref_obj); 
        errno = 0; 
        if (box->callback (box, PIKE_FD_WRITE_OOB) == -1) { 
          CALL_AND_UNSET_ONERROR(uwp); 
          goto backend_round_done; 
        } 
      } 
 
      if (box->revents & box->events & PIKE_BIT_FD_WRITE) { 
        /* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in 
         *        the kqueue case. 
         */ 
        PDWERR("[%d]BACKEND[%d]: write_callback(%d, %p)\n", 
               THR_NO, me->id, fd, box->ref_obj); 
        errno = 0; 
        if (box->callback (box, PIKE_FD_WRITE) == -1) { 
          CALL_AND_UNSET_ONERROR(uwp); 
          goto backend_round_done; 
        } 
      } 
 
 
      if (box->revents & box->events & PIKE_BIT_FD_FS_EVENT) { 
        /* FIXME: Consider utilizing ACTIVE_POLLSET[i].data in 
         *        the kqueue case. 
         */ 
        PDWERR("[%d]BACKEND[%d]: fs_event_callback(%d, %p)\n", 
               THR_NO, me->id, fd, box->ref_obj); 
        errno = 0; 
        if (box->callback (box, PIKE_FD_FS_EVENT) == -1) { 
          CALL_AND_UNSET_ONERROR(uwp); 
          goto backend_round_done; 
        } 
      } 
 
      if (box->revents & PIKE_BIT_FD_ERROR) { 
        /* Error */ 
        int old_events; 
        int err; 
        ACCEPT_SIZE_T len = sizeof (err); 
        errno = 0; 
        /* FIXME: This could be too late - the error might be 
         * clobbered by the callbacks we might have called 
         * above. */ 
        if (!getsockopt (fd, SOL_SOCKET, SO_ERROR, (void *) &err, &len)) { 
          PDWERR("[%d]BACKEND[%d]: POLLERR on %d, error=%d\n", 
                 THR_NO, me->id, fd, err); 
          errno = err; 
        } 
        else { 
          /* Note: This happens for FIFOs and PIPEs on Linux on the write-end 
           *       if the read-end has been closed. 
           */ 
#ifdef PIKE_DEBUG 
#ifdef ENOTSOCK 
          if (errno != ENOTSOCK) { 
#endif 
            WERR("Got POLLERR on non-socket fd %d (getsockopt errno=%d)\n", 
                 fd, errno); 
#ifdef ENOTSOCK 
          } else { 
            PDWERR("Got POLLERR on non-socket fd %d\n", fd); 
          } 
#endif 
#endif /* PIKE_DEBUG */ 
          errno = err = EPIPE; 
        } 
 
        box->revents = 0; 
        box->rflags = 0; 
 
        /* We don't want to keep this fd anymore. 
         * Note: This disables any further callbacks. 
         */ 
        old_events = box->events; 
        set_fd_callback_events(box, box->events & PIKE_BIT_FD_ERROR, 
                               box->flags); 
        if (WANT_EVENT (box, ERROR)) { 
          PDWERR("[%d]BACKEND[%d]: error event on fd %d sent to %p\n", 
                 THR_NO, me->id, fd, box->ref_obj); 
          if (box->callback (box, PIKE_FD_ERROR) == -1) { 
            CALL_AND_UNSET_ONERROR(uwp); 
            goto backend_round_done; 
          } 
        } 
        /* The following is temporary compat stuff. */ 
        /* kqueue TODO: Shouldn't need to do anything here for fs events, 
         *              but should verify this. 
         */ 
        else if (old_events & PIKE_BIT_FD_READ) { 
          PDWERR("[%d]BACKEND[%d]: read_callback(%d, %p) for error %d\n", 
                 THR_NO, me->id, fd, box->ref_obj, err); 
          if (box->callback (box, PIKE_FD_READ) == -1) { 
            CALL_AND_UNSET_ONERROR(uwp); 
            goto backend_round_done; 
          } 
        } else if (old_events & PIKE_BIT_FD_WRITE) { 
          PDWERR("[%d]BACKEND[%d]: write_callback(%d, %p) for error %d\n", 
                 THR_NO, me->id, fd, box->ref_obj, err); 
          if (box->callback (box, PIKE_FD_WRITE) == -1) { 
            CALL_AND_UNSET_ONERROR(uwp); 
            goto backend_round_done; 
          } 
        } 
      } 
 
      CALL_AND_UNSET_ONERROR(uwp); 
    } 
    return 0; 
 
  backend_round_done: 
    return 1; 
  } 
 
  INIT 
  { 
    struct Backend_struct *me = THIS; 
 
    me->id = unused_id++; 
 
    PDWERR("[%d]BACKEND[%d]: init\n", THR_NO, me->id); 
 
#ifdef _REENTRANT 
    me->set_busy = 0; 
    co_init(&me->set_change); 
#endif /* _REENTRANT */ 
    me->exec_thread = 0; 
#ifdef PIKE_THREADS 
    co_init(&me->backend_signal); 
    me->done_counter = 0; 
#endif 
 
    me->backend_callbacks.callbacks=0; 
    me->backend_callbacks.num_calls=0; 
 
    INVALIDATE_CURRENT_TIME(); /* Why? /mast */ 
 
    me->num_pending_calls=0; 
    me->call_heap = 0; 
    me->call_heap_size = 0; 
    me->hash_size=0; 
    me->hash_order=5; 
    me->call_hash=0; 
 
    me->backend_obj = Pike_fp->current_object; /* Note: Not refcounted. */ 
 
#ifdef PIKE_DEBUG 
    me->inside_call_out=0; 
#endif 
 
    me->fd_boxes=0; 
    me->fd_boxes_start = me->fd_boxes_size = 0; 
    me->inactive_boxes = me->free_inactive_box = NULL; 
    me->inactive_boxes_size = 0; 
 
#ifdef PIKE_DEBUG 
    me->debug_handler = NULL; 
#endif 
    me->update_fd_set_handler = NULL; 
    me->handler_data = me; 
 
    /* Note that we can't hook the wakeup pipe 
     * until we are fully initialized. 
     * The actual hooking of the wakeup pipe 
     * is therefore done in create() below. 
     */ 
    me->wakeup_pipe_send_fd = -1; 
    INIT_FD_CALLBACK_BOX(&me->wakeup_cb_box, me, NULL, -1, 
                         PIKE_BIT_FD_READ, wakeup_callback, 0); 
 
    me->may_need_wakeup = 0; 
 
#ifdef DO_PIKE_CLEANUP 
    num_active_backends++; 
#endif 
  } 
 
  EXIT 
    gc_trivial; 
  { 
    struct Backend_struct *me=THIS; 
    int e; 
 
    PDWERR("[%d]BACKEND[%d]: exit\n", THR_NO, me->id); 
 
    free_callback_list(& THIS->backend_callbacks); 
 
    if (THIS->wakeup_cb_box.fd >= 0) 
      fd_close(THIS->wakeup_cb_box.fd); 
    if (me->wakeup_pipe_send_fd >= 0) 
      fd_close(THIS->wakeup_pipe_send_fd); 
 
    if (me->fd_boxes) { 
      FOR_EACH_ACTIVE_FD_BOX (me, box) { 
        check_box (box, INT_MAX); 
 
#ifdef PIKE_DEBUG 
        if (get_backend_for_fd (box->fd) != me) 
          Pike_fatal ("Inconsistency in global fd map for fd %d: " 
                      "backend is %p, expected %p.\n", 
                      box->fd, get_backend_for_fd (box->fd), me); 
#endif 
 
        if (box->callback == compat_box_dispatcher) { 
          DWERR("[%d]BACKEND[%d]: " 
                "Compat callbacks left at exit for fd %d: 0x%x\n", 
                THR_NO, me->id, box->fd, box->events); 
          really_free_compat_cb_box ((struct compat_cb_box *) box); 
        } 
 
        unhook_fd_callback_box(box); 
      } 
 
      free(me->fd_boxes); 
      me->fd_boxes = NULL; 
      me->fd_boxes_start = me->fd_boxes_size = 0; 
    } 
 
    if (me->inactive_boxes) { 
      FOR_EACH_INACTIVE_FD_BOX (me, box) { 
        check_box (box, INT_MAX); 
#ifdef PIKE_DEBUG 
        if (box->callback == compat_box_dispatcher) 
          Pike_fatal ("Got inactive callback in compat interface.\n"); 
#endif 
 
        unhook_fd_callback_box(box); 
      } 
 
      free(me->inactive_boxes); 
      me->inactive_boxes = me->free_inactive_box = NULL; 
      me->inactive_boxes_size = 0; 
    } 
 
    /* Make sure we aren't referenced any more. */ 
    /* FIXME: Ought to keep better track of our fds so that we don't 
     * need to do this loop. /mast */ 
    for (e = 0; e < fd_map_size; e++) { 
      if (fd_map[e] == me) fd_map[e] = NULL; 
    } 
 
    /* CALL OUT */ 
    backend_verify_call_outs(me); 
    for(e=0;e<me->num_pending_calls;e++) 
    { 
      CALL(e)->pos = -1; 
      if (CALL(e)->this) 
        free_object(CALL(e)->this); 
    } 
    me->num_pending_calls=0; 
    if(me->call_heap) free(me->call_heap); 
    me->call_heap = NULL; 
    if(me->call_hash) free(me->call_hash); 
    me->call_hash=NULL; 
 
#ifdef PIKE_THREADS 
    co_destroy(&me->backend_signal); 
#endif 
#ifdef _REENTRANT 
    co_destroy(&me->set_change); 
#endif 
 
#ifdef DO_PIKE_CLEANUP 
    if (!--num_active_backends) backend_cleanup(); 
#endif 
  } 
 
  /*! @decl void create() 
   */ 
  PIKEFUN void create() 
    flags ID_PROTECTED; 
  { 
    struct Backend_struct *me = THIS; 
 
    if (!me->update_fd_set_handler) { 
      Pike_error("Attempt to clone the base Backend class.\n"); 
    } 
 
    if (me->wakeup_pipe_send_fd < 0) { 
      int pipe[2]; 
      PDWERR("[%d]BACKEND[%d]: Creating wakeup pipe...\n", THR_NO, me->id); 
      if(pike_make_pipe(pipe) < 0) 
        Pike_error("Couldn't create backend wakeup pipe! errno=%d.\n",errno); 
 
      set_nonblocking(pipe[0],1); 
      set_nonblocking(pipe[1],1); 
      PDWERR("[%d]BACKEND[%d]: Initializing wakeup pipe...\n", THR_NO, me->id); 
      change_fd_for_box (&me->wakeup_cb_box, pipe[0]); 
      me->wakeup_pipe_send_fd = pipe[1]; 
 
      PDWERR("[%d]BACKEND[%d]: Wakeup pipe is [%d, %d]\n", THR_NO, me->id, 
             me->wakeup_pipe_send_fd, THIS->wakeup_cb_box.fd); 
 
      /* Don't keep these on exec! */ 
      set_close_on_exec(pipe[0], 1); 
      set_close_on_exec(pipe[1], 1); 
    } 
  } 
} 
 
/*! @endclass 
 */ 
 
/* 
 * POLL/SELECT selection 
 */ 
 
#ifndef HAVE_AND_USE_POLL 
/* Various BSDs have simulated poll(2) APIs. */ 
#undef HAVE_POLL 
#endif 
 
/* #undef BACKEND_USES_DEVPOLL */ 
/* #undef BACKEND_USES_DEVEPOLL */ 
/* #undef BACKEND_USES_POLL_DEVICE */ 
 
#ifdef HAVE_POLL 
 
/* 
 * Backends using poll(2) or similar. 
 */ 
 
/* Some constants... */ 
 
/* Notes on POLLRDNORM and POLLIN: 
 * 
 * According to the AIX manual, POLLIN and POLLRDNORM are both set 
 * if there's a nonpriority message on the read queue. POLLIN is 
 * also set if the message is of 0 length. 
 */ 
 
#ifndef POLLRDNORM 
#define POLLRDNORM      POLLIN 
#endif /* !POLLRDNORM */ 
 
#ifndef POLLRDBAND 
#define POLLRDBAND      POLLPRI 
#endif /* !POLLRDBAND */ 
 
#ifndef POLLWRNORM 
#define POLLWRNORM      POLLOUT 
#endif /* POLLWRNORM */ 
 
#ifndef POLLWRBAND 
#define POLLWRBAND      POLLOUT 
#endif /* !POLLWRBAND */ 
 
#define MY_POLLIN POLLRDNORM|POLLIN 
#define MY_POLLOUT POLLWRNORM|POLLOUT 
 
#define MY_POLLEXCEPT   POLLRDBAND|POLLRDNORM|POLLIN 
#define MY_POLLRDBAND   POLLRDBAND|POLLPRI 
#define MY_POLLWREXCEPT POLLWRBAND|POLLWRNORM|POLLOUT 
#define MY_POLLWRBAND   POLLWRBAND|MY_POLLOUT 
#define MY_POLLNVAL     POLLNVAL 
 
#if (POLLRDBAND != POLLRDNORM) && (POLLRDBAND != POLLIN) 
#define RDBAND_IS_SPECIAL 
#endif 
 
#if (POLLWRBAND != POLLOUT) && (POLLWRBAND != POLLWRNORM) 
#define WRBAND_IS_SPECIAL 
#endif 
 
#define TIMEOUT_IS_MILLISECONDS 
 
#ifdef BACKEND_USES_DEVPOLL 
/* 
 * Backend using /dev/poll-style poll device. 
 * 
 * Used on: 
 *   Solaris 7 + patches and above. 
 *   OSF/1 + patches and above. 
 */ 
 
#define POLL_EVENT      struct pollfd 
#define OPEN_POLL_DEVICE(X)     open(PIKE_POLL_DEVICE, O_RDWR) 
#define CHILD_NEEDS_TO_REOPEN 
 
#define DECLARE_POLL_EXTRAS             \ 
  POLL_EVENT poll_fds[POLL_SET_SIZE];   \ 
  struct dvpoll poll_request = {        \ 
    poll_fds,                           \ 
    POLL_SET_SIZE,                      \ 
    0,                                  \ 
  } 
 
#define PDB_POLL(PFD, TIMEOUT)                                  \ 
  ((poll_request.dp_timeout = (TIMEOUT)),                       \ 
   (ioctl(PFD, DP_POLL, &poll_request, sizeof(poll_request)))) 
 
int POLL_DEVICE_SET_EVENTS(struct Backend_struct *me, 
                           int pfd, int fd, INT32 events) 
{ 
  struct pollfd poll_state[2]; 
  int e; 
  int sz = sizeof(poll_state); 
 
  PDWERR("POLL_DEVICE_SET_EVENTS(%p, %d, %d, 0x%08x)\n", me, pfd, fd, events); 
 
  /* NOTE: POLLREMOVE must (unfortunately) be a separate request. */ 
  poll_state[0].fd = fd; 
  poll_state[0].events = POLLREMOVE; 
  poll_state[0].revents = 0; 
  poll_state[1].fd = fd; 
  poll_state[1].events = events; 
  poll_state[1].revents = 0; 
 
  if (!events) { 
    /* We're not interrested in the fd anymore. */ 
    sz = sizeof(poll_state[0]); 
  } 
 
#ifdef _REENTRANT 
  /* FIXME: Ought to check if we're the backend. 
   */ 
  if(num_threads>1) 
  { 
    SWAP_OUT_CURRENT_THREAD(); 
    /* Release the poll set from the backend. */ 
    PDWERR("POLL_DEVICE_SET_EVENTS[%p] grabbing the poll set\n", me); 
    while (me->set_busy) { 
      co_wait_interpreter(&me->set_change); 
    } 
    me->set_busy = 1; 
    PDWERR("POLL_DEVICE_SET_EVENTS[%p] wake up backend\n", me); 
    backend_wake_up_backend(me); 
    /* The backend is now waiting in wakeup_callback(). */ 
    SWAP_IN_CURRENT_THREAD(); 
  } 
#endif /* _REENTRANT */ 
 
  PDWERR("POLL_DEVICE_SET_EVENTS[%p] updating the poll set\n", me); 
  while (((e = write(pfd, poll_state, sz)) < 0) && (errno == EINTR)) 
    ; 
 
#ifdef _REENTRANT 
  me->set_busy = 0; 
  if(num_threads>1) 
  { 
    /* Release the backend from wakeup_callback(). */ 
    PDWERR("POLL_DEVICE_SET_EVENTS[%p] releasing the backend\n", me); 
    co_broadcast(&me->set_change); 
  } 
#endif /* _REENTRANT */ 
 
  if (e < 0) { 
    Pike_fatal("Failed to set state for fd %d in " PIKE_POLL_DEVICE 
               " (errno:%d).\n", 
               fd, errno); 
  } 
 
  /* FIXME: Probably ought to support partial writes. */ 
  if (e != sz) { 
    Pike_fatal("Failed to set state for fd %d in " PIKE_POLL_DEVICE 
               " short write (%d != %d).\n", 
               fd, e, (int)sizeof(poll_state)); 
  } 
  PDWERR("POLL_DEVICE_SET_EVENTS[%p] ==> %d\n", me, e); 
  return e; 
} 
 
#elif defined(BACKEND_USES_DEVEPOLL) 
/* 
 * Backend using /dev/epoll-style poll device. 
 * 
 * Used on: 
 *   Linux 2.6 and above. 
 * Note: 
 *   Some libc's are missing wrappers for the system calls, so 
 *   we include the appropriate wrappers below. 
 */ 
 
#ifndef PIKE_POLL_DEVICE 
#define PIKE_POLL_DEVICE        "epoll" 
#endif 
 
/* The following three are defined by <gnu/stubs.h> which is included 
 * from <features.h> which is included from just about everywhere, so 
 * it is safe to assume that they have been defined if appropriate. 
 */ 
#if defined(__stub_epoll_create) || defined(__stub_epoll_ctl) || \ 
    defined(__stub_epoll_wait) 
/* We have a libc without the wrappers for epoll support. 
 */ 
#ifndef __NR_epoll_create 
/* Our libc doesn't even know the syscall numbers for the epoll syscalls. 
 */ 
#ifdef __i386__ 
#define __NR_epoll_create 254 
#define __NR_epoll_ctl 255 
#define __NR_epoll_wait 256 
#elif defined(__x86_64__) 
#define __NR_epoll_create 214 
#define __NR_epoll_ctl 233 
#define __NR_epoll_wait 232 
#else /* cpu types */ 
#error Syscall numbers for epoll_create et al not known on this architecture. 
#endif /* cpu types */ 
#endif /* !defined(__NR_epoll_create) */ 
#if defined(_syscall1) && defined(_syscall4) 
_syscall1(int, epoll_create, int, size); 
_syscall4(int, epoll_ctl, int, epfd, int, op, int, fd, 
          struct epoll_event *, event); 
_syscall4(int, epoll_wait, int, epfd, struct epoll_event *, pevents, 
          int, maxevents, int, timeout); 
#undef __stub_epoll_create 
#undef __stub_epoll_ctl 
#undef __stub_epoll_wait 
#else /* !_syscall1 || !_syscall4 */ 
#error Missing macros for generation of syscall wrappers. 
#endif /* _syscall1 && _syscall4 */ 
#endif /* __stub_epoll_{create, ctl, wait} */ 
 
#define POLL_EVENT      struct epoll_event 
#define PDB_GET_FD(EVENT)       EVENT.data.fd 
#define PDB_GET_EVENTS(EVENT)   EVENT.events 
 
/* FIXME: Might want another value instead on POLL_SET_SIZE. */ 
#define OPEN_POLL_DEVICE(X)     epoll_create(POLL_SET_SIZE) 
 
#define DECLARE_POLL_EXTRAS             \ 
  POLL_EVENT poll_fds[POLL_SET_SIZE] 
 
#define PDB_POLL(PFD, TIMEOUT)                          \ 
  epoll_wait(PFD, poll_fds, POLL_SET_SIZE, TIMEOUT) 
 
int POLL_DEVICE_SET_EVENTS(struct Backend_struct *UNUSED(me), 
                           int pfd, int fd, INT32 events) 
{ 
  int e; 
 
  if (events) { 
    struct epoll_event ev; 
#ifdef __CHECKER__ 
    memset(&ev, 0, sizeof(ev)); 
#endif 
    ev.events = events; 
    ev.data.fd = fd; 
 
    /* To avoid valgrind complaints when fd doesn't fill up the 
     * ev.data union. */ 
    PIKE_MEM_RW (ev.data); 
 
    /* The /dev/epoll interface exposes kernel implementation details... 
     */ 
    PDWERR("epoll_ctl(%d, EPOLL_CTL_MOD, %d, { 0x%08x, %d })\n", 
           pfd, fd, events, fd); 
    while (((e = epoll_ctl(pfd, EPOLL_CTL_MOD, fd, &ev)) < 0)  && 
           (errno == EINTR)) 
      ; 
    if ((e < 0) && (errno == ENOENT)) { 
      PDWERR("epoll_ctl(%d, EPOLL_CTL_ADD, %d, { 0x%08x, %d })\n", 
             pfd, fd, events, fd); 
      while (((e = epoll_ctl(pfd, EPOLL_CTL_ADD, fd, &ev)) < 0)  && 
             (errno == EINTR)) 
        ; 
    } 
  } else { 
    struct epoll_event dummy; 
    /* The last argument must be a proper struct pointer even 
     * though it isn't used... 
     */ 
    PIKE_MEM_RW (dummy); 
    PDWERR("epoll_ctl(%d, EPOLL_CTL_DEL, %d, &dummy)\n", pfd, fd); 
    while (((e = epoll_ctl(pfd, EPOLL_CTL_DEL, fd, &dummy)) < 0) && 
           (errno == EINTR)) 
      ; 
    if ((e < 0) && (errno == ENOENT)) return 0; 
  } 
  if (e < 0) { 
    PDWERR("epoll_ctl() failed with errno: %d\n", errno); 
  } 
 
  return e; 
} 
 
#endif /* HAVE_SYS_DEVPOLL_H || HAVE_SYS_EPOLL_H */ 
 
#ifdef HAVE_POLL 
 
/* 
 * Backend using poll(2). 
 * 
 * This is used on most older SVR4- or POSIX-style systems. 
 */ 
 
#define PB_POLL(SET, TIMEOUT)                           \ 
  poll((SET).poll_fds, (SET).num_in_poll, (TIMEOUT)) 
 
struct pb_selectors 
{ 
  struct pollfd *poll_fds; 
  int poll_fd_size; 
  int num_in_poll; 
}; 
 
static void pb_MY_FD_SET(struct pb_selectors *me, int fd, int add) 
{ 
  int i; 
  PDWERR("BACKEND: MY_FD_SET(%d, 0x%04x)\n", fd, add); 
  for(i=0; i<me->num_in_poll; i++) 
  { 
    if(me->poll_fds[i].fd == fd) 
    { 
      me->poll_fds[i].events |= add; 
      return; 
    } 
  } 
  me->num_in_poll++; 
  if (me->num_in_poll > me->poll_fd_size) 
  { 
    me->poll_fd_size += me->num_in_poll;        /* Usually a doubling */ 
    if (me->poll_fds) { 
      me->poll_fds = 
        realloc(me->poll_fds, sizeof(struct pollfd)*me->poll_fd_size); 
    } else { 
      me->poll_fds = malloc(sizeof(struct pollfd)*me->poll_fd_size); 
    } 
    if (!me->poll_fds) 
    { 
      Pike_fatal("Out of memory in backend::MY_FD_SET()\n" 
            "Tried to allocate %d pollfds\n", me->poll_fd_size); 
    } 
  } 
  me->poll_fds[me->num_in_poll-1].fd = fd; 
  me->poll_fds[me->num_in_poll-1].events = add; 
} 
 
static void pb_MY_FD_CLR(struct pb_selectors *me, int fd, int sub) 
{ 
  int i; 
  PDWERR("BACKEND: POLL_FD_CLR(%d, 0x%04x)\n", fd, sub); 
  if(!me->poll_fds) return; 
  for(i=0; i<me->num_in_poll; i++) 
  { 
    if(me->poll_fds[i].fd == fd) 
    { 
      me->poll_fds[i].events &= ~sub; 
      if(!me->poll_fds[i].events) 
      { 
        /* Note that num_in_poll is decreased here. 
         * This is to avoid a lot of -1's below. 
         * /grubba 
         */ 
        me->num_in_poll--; 
        if(i != me->num_in_poll) 
        { 
          me->poll_fds[i] = me->poll_fds[me->num_in_poll]; 
        } 
        /* Might want to shrink poll_fds here, but probably not. */ 
      } 
      break; 
    } 
  } 
} 
 
 
static void pb_copy_selectors(struct pb_selectors *to, 
                              struct pb_selectors *from) 
{ 
  PDWERR("BACKEND: copy_poll_set() from->num_in_poll=%d\n", from->num_in_poll); 
 
  if (to->poll_fd_size < from->num_in_poll) 
  { 
    PDWERR("BACKEND: copy_poll_set() size %d -> %d\n", 
           to->poll_fd_size, from->poll_fd_size); 
    to->poll_fd_size=from->poll_fd_size; 
    if (to->poll_fds) { 
      to->poll_fds = 
        realloc(to->poll_fds, sizeof(struct pollfd)*to->poll_fd_size); 
    } else { 
      to->poll_fds = 
        malloc(sizeof(struct pollfd)*to->poll_fd_size); 
    } 
    if (!to->poll_fds) { 
      Pike_fatal("Out of memory in backend::copy_poll_set()\n" 
            "Tried to allocate %d pollfds\n", to->poll_fd_size); 
    } 
  } 
 
  memcpy(to->poll_fds, 
         from->poll_fds, 
         sizeof(struct pollfd)*from->num_in_poll); 
  to->num_in_poll=from->num_in_poll; 
} 
 
#endif /* HAVE_POLL */ 
 
#define PB_GET_FD(EVENT)        EVENT.fd 
#ifndef PDB_GET_FD 
#define PDB_GET_FD(EVENT)       PB_GET_FD(EVENT) 
#endif 
#define PB_GET_EVENTS(EVENT)    EVENT.revents 
#ifndef PDB_GET_EVENTS 
#define PDB_GET_EVENTS(EVENT)   PB_GET_EVENTS(EVENT) 
#endif 
#ifndef PDB_GET_FLAGS 
#define PDB_GET_FLAGS(EVENT)    0 
#endif 
 
#elif defined(BACKEND_USES_KQUEUE) 
/* 
 * Backend using kqueue-style poll device. 
 * 
 * FIXME: Not fully implemented yet! Out of band data handling is missing. 
 * 
 * Used on 
 *   FreeBSD 4.1 and above. 
 *   MacOS X/Darwin 7.x and above. 
 *   Various other BSDs. 
 */ 
 
 
#define POLL_EVENT      struct kevent 
 
#ifdef BACKEND_USES_CFRUNLOOP 
#define OPEN_POLL_DEVICE(X)     my_kqueue(X) 
#else 
#define OPEN_POLL_DEVICE(X)     kqueue() 
#endif 
 
#define CHILD_NEEDS_TO_REOPEN 
 
#define PIKE_POLL_DEVICE        "kqueue" 
 
#define TIMEOUT_IS_TIMESPEC 
 
#define MY_POLLIN       EVFILT_READ 
#define MY_POLLOUT      EVFILT_WRITE 
 
/* NOTE: The following 4 event types are specific to kqueue(2) */ 
#define MY_POLLFSEVENT  EVFILT_VNODE 
#define MY_POLLPROCESS  EVFILT_PROC 
#define MY_POLLSIGNAL   EVFILT_SIGNAL 
#define MY_POLLTIMER    EVFILT_TIMER 
 
#define MY_POLLERR      EV_ERROR 
#if 0 
#define MY_POLLHUP      EV_EOF 
#else /* !0 */ 
#define MY_POLLHUP      0 
#endif /* 0 */ 
 
#ifdef __APPLE__ 
/* Not defined in old MacOS X. */ 
#ifndef EV_OOBAND 
#define EV_OOBAND       EV_FLAG1 
#endif 
#elif !defined(EV_OOBAND) 
/* FreeBSD seems to handle OOB data inline with ordinary data. */ 
#define EV_OOBAND       0 
#endif /* __APPLE__ */ 
 
/* FIXME: The kqueue API has no documented support for out of band data. */ 
#define MY_POLLEXCEPT   EVFILT_READ 
#define MY_POLLRDBAND   EVFILT_READ 
#define MY_POLLWREXCEPT EVFILT_WRITE 
#define MY_POLLWRBAND   EVFILT_WRITE 
 
#define DECLARE_POLL_EXTRAS             \ 
  POLL_EVENT poll_fds[POLL_SET_SIZE] 
 
#define PDB_POLL(SET, TIMEOUT)                                  \ 
  kevent((SET), NULL, 0, poll_fds, POLL_SET_SIZE, &(TIMEOUT)) 
 
#define PDB_GET_FD(EVENT)               EVENT.ident 
#define PDB_GET_EVENTS(EVENT)           EVENT.filter 
#define PDB_GET_FLAGS(EVENT)    EVENT.fflags 
#define PDB_CHECK_EVENT(EVENT, MASK)    (PDB_GET_EVENTS(EVENT) == (MASK)) 
 
/* NOTE: Error events are signalled in the flags field. They thus 
 *       must be checked for before the ordinary events. 
 */ 
#define PDB_CHECK_ERROR_EVENT(EVENT, MASK)      (EVENT.flags & (MASK)) 
 
int pdb_MY_FD_CLR(int *pfd, int fd, int filter) 
{ 
  struct kevent ev; 
 
  /* Note: Use EV_DISABLE in preference to EV_DELETE, since 
   *       odds are that the fd will be reenabled, and the 
   *       filter is deleted anyway when the fd is closed. 
   */ 
  EV_SET(&ev, fd, filter, EV_DISABLE, 0, 0, 0); 
 
  return kevent(*pfd, &ev, 1, NULL, 0, NULL); 
} 
 
#define pdb_MY_FD_SET(PFD, FD, FILTER) pdb_MY_FD_SET2(PFD, FD, FILTER, 0) 
 
int pdb_MY_FD_SET2(int *pfd, int fd, int filter, int fflags) 
{ 
  struct kevent ev[2]; 
 
 /* VNODE filters seem to need ONESHOT mode, else they just repeat endlessly. */ 
 if(filter == EVFILT_VNODE) 
   EV_SET(ev, fd, filter, EV_ADD|EV_ENABLE|EV_CLEAR, fflags, 0, 0); 
 else 
   EV_SET(ev, fd, filter, EV_ADD|EV_ENABLE, fflags, 0, 0); 
 
  return kevent(*pfd, ev, 1, NULL, 0, NULL); 
} 
 
#define pdb_MY_FD_CLR_RDBAND(SET, FD) 
#define pdb_MY_FD_CLR_WRBAND(SET, FD) 
 
#endif 
 
/* 
 * Backend using select(2) 
 * 
 * This is used on most older BSD-style systems, and WIN32. 
 */ 
 
#define MY_READSET      0 
#define MY_WRITESET     1 
#define MY_EXCEPTSET    2 
/* except == incoming OOB data (or error according to POSIX) 
 * outgoing OOB data is multiplexed on write 
 */ 
 
struct sb_selectors 
{ 
  int max_fd; 
  my_fd_set sets[3]; 
}; 
 
struct sb_active_selectors 
{ 
  fd_set asets[3]; 
  int max_fd; 
}; 
 
#define SB_SELECT(SET, TIMEOUT)                                 \ 
  fd_select((SET).max_fd + 1,                                   \ 
            (SET).asets + MY_READSET,                           \ 
            (SET).asets + MY_WRITESET,                          \ 
            (SET).asets + MY_EXCEPTSET,                         \ 
            (TIMEOUT).tv_sec >= 100000000 ? NULL : &(TIMEOUT)) 
 
void sb_MY_FD_CLR(struct sb_selectors *me, int fd, int setno) 
{ 
  if(fd > me->max_fd) return; 
  my_FD_CLR(fd, me->sets + setno); 
  if(fd == me->max_fd) 
  { 
    while(me->max_fd >=0 && 
          !my_FD_ISSET(me->max_fd, me->sets + MY_READSET) && 
          !my_FD_ISSET(me->max_fd, me->sets + MY_WRITESET) 
          && !my_FD_ISSET(me->max_fd, me->sets + MY_EXCEPTSET) 
      ) 
      me->max_fd--; 
  } 
} 
 
void sb_MY_FD_SET(struct sb_selectors *me, int fd, int setno) 
{ 
  my_FD_SET(fd, me->sets + setno); 
  if(fd > me->max_fd) me->max_fd=fd; 
} 
 
#ifndef HAVE_POLL 
static void sb_copy_selectors(struct sb_active_selectors *to, 
                              struct sb_selectors *from) 
{ 
  fd_copy_my_fd_set_to_fd_set(to->asets + MY_READSET, 
                              from->sets + MY_READSET, from->max_fd+1); 
  fd_copy_my_fd_set_to_fd_set(to->asets + MY_WRITESET, 
                              from->sets + MY_WRITESET, from->max_fd+1); 
  fd_copy_my_fd_set_to_fd_set(to->asets + MY_EXCEPTSET, 
                              from->sets + MY_EXCEPTSET, from->max_fd+1); 
  to->max_fd=from->max_fd; 
} 
#endif 
 
#ifndef POLL_SET_SIZE 
#define POLL_SET_SIZE           32 
#endif /* !POLL_SET_SIZE */ 
 
#define PB_CHECK_EVENT(EVENT, MASK)     (PB_GET_EVENTS(EVENT) & (MASK)) 
#ifndef PDB_CHECK_EVENT 
#define PDB_CHECK_EVENT(EVENT, MASK)    (PDB_GET_EVENTS(EVENT) & (MASK)) 
#endif /* PB_CHECK_EVENT */ 
 
#define PB_CHECK_ERROR_EVENT(EVENT, MASK)       PB_CHECK_EVENT(EVENT, MASK) 
#ifndef PDB_CHECK_ERROR_EVENT 
#define PDB_CHECK_ERROR_EVENT(EVENT, MASK)      PDB_CHECK_EVENT(EVENT, MASK) 
#endif /* PB_CHECK_ERROR_EVENT */ 
 
#ifdef RDBAND_IS_SPECIAL 
#  define pb_MY_FD_CLR_RDBAND(SET, FD) pb_MY_FD_CLR (SET, FD, MY_POLLRDBAND) 
#else 
#  define pb_MY_FD_CLR_RDBAND(SET, FD) 
#endif 
 
#ifdef WRBAND_IS_SPECIAL 
#  define pb_MY_FD_CLR_WRBAND(SET, FD) pb_MY_FD_CLR (SET, FD, MY_POLLWRBAND) 
#else 
#  define pb_MY_FD_CLR_WRBAND(SET, FD) 
#endif 
 
#ifndef MY_POLLERR 
#define MY_POLLERR      POLLERR 
#endif 
 
#ifndef MY_POLLHUP 
#define MY_POLLHUP      POLLHUP 
#endif 
 
#ifndef MY_POLLFSEVENT 
#define MY_POLLFSEVENT  0 
#endif 
 
#ifndef MY_POLLSIGNAL 
#define MY_POLLSIGNAL   0 
#endif 
 
#if defined(BACKEND_USES_POLL_DEVICE) || defined(BACKEND_USES_KQUEUE) 
 
/*! @class PollDeviceBackend 
 *! @inherit __Backend 
 *! 
 *! @[Backend] implemented with @tt{/dev/poll@} (Solaris and OSF/1), 
 *! @tt{epoll(2)@} (Linux) or @tt{kqueue(2)@} (MacOS X, FreeBSD, OpenBSD, etc). 
 *! 
 *! @seealso 
 *!   @[Backend] 
 */ 
PIKECLASS PollDeviceBackend 
{ 
  INHERIT Backend; 
 
  /* Helpers to find the above inherit. */ 
  static ptrdiff_t pdb_offset = 0; 
  CVAR struct Backend_struct *backend; 
 
  /* 
   * POLL/SELECT fd sets 
   */ 
  CVAR int set; 
#ifdef BACKEND_USES_CFRUNLOOP 
  CVAR int go_cf; 
  CVAR int external_run; 
  CVAR CFFileDescriptorRef fdref; 
  CVAR CFRunLoopSourceRef source; 
  CVAR CFRunLoopObserverRef beObserver; 
  CVAR CFRunLoopTimerRef beTimer; 
  CVAR int event_count; 
  CVAR int gil_released; 
  CVAR struct thread_state * thread_state; /* used by external runloops. */ 
#endif /* BACKEND_USES_CFRUNLOOP */ 
#ifdef DECLARE_POLL_EXTRAS 
    /* Declare any extra variables needed by MY_POLL(). */ 
  CVAR struct kevent* poll_fds; 
#endif /* DECLARE_POLL_EXTRAS */ 
 
  DECLARE_STORAGE 
 
  /*! @decl optional void set_signal_event_callback(int signum, function cb) 
   *! 
   *! Request @[cb] to be called from the backend when the signal 
   *! @[signum] is received. 
   *! 
   *! @note 
   *!   This function is a noop except for the @tt{kqueue@} case. 
   *! 
   *! @note 
   *!   Caveat emptor: Unlikely to work. 
   *! 
   *! @seealso 
   *!   @[signal()] 
   */ 
  PIKEFUN void set_signal_event_callback(int signum, function cb) 
    flags ID_OPTIONAL; 
  { 
    int q; 
#ifdef BACKEND_USES_KQUEUE 
    struct kevent ev[2]; 
    EV_SET(ev, signum, MY_POLLSIGNAL, EV_ADD, 0, 0, 0); 
    q = kevent(THIS->set, ev, 1, NULL, 0, NULL); 
 
    /* FIXME: Shouldn't cb be registered somewhere? */ 
#endif 
  } 
 
  /* 
   * FD set handling 
   */ 
 
  static void pdb_UPDATE_BLACK_BOX(struct PollDeviceBackend_struct *me, int fd, 
                                   int wanted_events) 
  { 
#ifdef BACKEND_USES_POLL_DEVICE 
    INT32 events = 0; 
 
    if (wanted_events & PIKE_BIT_FD_READ) { 
      events |= MY_POLLIN; 
    } 
    if (wanted_events & PIKE_BIT_FD_WRITE) { 
      events |= MY_POLLOUT; 
    } 
    if (wanted_events & PIKE_BIT_FD_READ_OOB) { 
      events |= MY_POLLRDBAND; 
    } 
    if (wanted_events & PIKE_BIT_FD_WRITE_OOB) { 
      events |= MY_POLLWRBAND; 
    } 
    if (wanted_events & PIKE_BIT_FD_FS_EVENT) { 
      events |= MY_POLLFSEVENT; 
    } 
 
    PDWERR("UPDATE_BLACK_BOX(%d, %d) ==> events: 0x%08x\n", 
           me->set, fd, events); 
    POLL_DEVICE_SET_EVENTS(me->backend, me->set, fd, events); 
#elif defined(BACKEND_USES_KQUEUE) 
    /* Note: Only used by REOPEN_POLL_DEVICE on a freshly opened kqueue. */ 
    struct kevent ev[3]; 
    int nev = 0; 
    if (wanted_events & (PIKE_BIT_FD_READ|PIKE_BIT_FD_READ_OOB)) { 
      if (wanted_events & PIKE_BIT_FD_READ_OOB) { 
        /* We want OOB notifications. 
         * 
         * Without setting EV_OOBAND, we won't get any OOB notifications, 
         * unless there is also in-band data. 
         */ 
        PDWERR("BACKEND: EV_SET fd:%d, READ, ADD, [OOBAND]\n", fd); 
        EV_SET(ev, fd, MY_POLLIN, EV_ADD|EV_OOBAND, 0, 0, 0); 
      } else { 
        PDWERR("BACKEND: EV_SET fd:%d, READ, ADD\n", fd); 
        EV_SET(ev, fd, MY_POLLIN, EV_ADD, 0, 0, 0); 
      } 
      nev++; 
    } 
    if (wanted_events & PIKE_BIT_FD_WRITE || 
        wanted_events & PIKE_BIT_FD_WRITE_OOB) { 
      PDWERR("BACKEND: EV_SET fd:%d, WRITE, ADD\n", fd); 
      EV_SET(ev+nev, fd, MY_POLLOUT, EV_ADD, 0, 0, 0); 
      nev++; 
    } 
    if (wanted_events & PIKE_BIT_FD_FS_EVENT) { 
      /* kqueue TODO generate fflags from the high bits of 
       *             the wanted_events argument. 
       */ 
      EV_SET(ev+nev, fd, MY_POLLFSEVENT, EV_ADD, 0, 0, 0); 
      nev++; 
    } 
    if (nev) 
      kevent(me->set, ev, nev, NULL, 0, NULL); 
#endif /* BACKEND_USES_POLL_DEVICE */ 
  } 
 
 
#ifdef BACKEND_USES_CFRUNLOOP 
  /* We place these declarations here rather than at the top 
   * in order to avoid struct-y unpleasantness 
   */ 
  static int init_cf(struct PollDeviceBackend_struct *me, int i); 
  static int init_external_cfrl(struct PollDeviceBackend_struct *me, int i); 
  static int low_my_kqueue(struct PollDeviceBackend_struct *me); 
 
  static int my_kqueue(struct PollDeviceBackend_struct *me) 
  { 
     int i; 
 
     i = low_my_kqueue(me); 
 
     return i; 
  } 
 
  static int low_my_kqueue(struct PollDeviceBackend_struct *me) 
  { 
    int i; 
 
    i = kqueue(); 
    if(me->go_cf) 
      return init_cf(me, i); 
    else return i; 
  } 
 
  /* arg i is the kqueue. */ 
  static int init_external_cfrl(struct PollDeviceBackend_struct *me, 
                                int UNUSED(i)) 
  { 
    /* we assume that init_cf() has already been called. */ 
 
    CFRunLoopObserverRef beObserver = NULL; 
 
    int myActivities = kCFRunLoopBeforeWaiting | kCFRunLoopAfterWaiting | 
      kCFRunLoopBeforeTimers | kCFRunLoopBeforeSources | kCFRunLoopExit; 
 
    CFRunLoopObserverContext context = {0, me, NULL, NULL, NULL}; 
 
    beObserver = CFRunLoopObserverCreate(NULL, myActivities, 1, 
            /* repeat */ 1, cfObserverCallback, &context); 
 
    if (beObserver) 
    { 
      CFRetain(beObserver); 
      CFRunLoopAddObserver(CFRunLoopGetCurrent(), beObserver, 
        kCFRunLoopDefaultMode); 
    } 
 
    return 0; 
  } 
 
  /* arg i is the kqueue. */ 
  static int exit_external_cfrl(struct PollDeviceBackend_struct *me) 
  { 
    /* we assume that init_cf() has already been called. */ 
    CFRunLoopObserverRef beObserver = me->beObserver; 
    CFRunLoopTimerRef beTimer = me->beTimer; 
    if (beObserver) 
    { 
      CFRunLoopRemoveObserver(CFRunLoopGetCurrent(), beObserver, 
        kCFRunLoopDefaultMode); 
      CFRelease(beObserver); 
    } 
    if(beTimer) 
    { 
      CFRelease(beTimer); 
    } 
 
    return 0; 
  } 
 
  /* arg i is the kqueue. */ 
  static int init_cf(struct PollDeviceBackend_struct *me, int i) 
  { 
    CFFileDescriptorContext context = {0, me, NULL, NULL, NULL}; 
 
    me->fdref = CFFileDescriptorCreate(kCFAllocatorDefault, i, true, 
                                       noteEvents, &context); 
    CFRetain(me->fdref); 
 
    me->source = CFFileDescriptorCreateRunLoopSource(kCFAllocatorDefault, 
                                                     me->fdref, 0); 
    CFFileDescriptorEnableCallBacks(me->fdref, kCFFileDescriptorReadCallBack); 
    CFRetain(me->source); 
 
    return i; 
  } 
 
  static void exit_cf(struct PollDeviceBackend_struct *me) 
  { 
    if(me->source) 
    { 
      CFRunLoopSourceInvalidate(me->source); 
      CFRelease(me->source); 
    } 
 
    if(me->fdref) 
      CFRelease(me->fdref); 
  } 
 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
  /* This is called in the child process to restore 
   * poll state after fork() in case of detaching. 
   */ 
  static void pdb_REOPEN_POLL_DEVICE(struct PollDeviceBackend_struct *me) 
  { 
    int fd; 
 
    while ((close(me->set) < 0) && (errno == EINTR)) 
      ; 
    while (((fd = OPEN_POLL_DEVICE(me)) < 0) && (errno == EINTR)) 
      ; 
    if (fd < 0) { 
      Pike_fatal("Failed to reopen " PIKE_POLL_DEVICE 
                 " after fork (errno: %d).\n", errno); 
    } 
    if (fd != me->set) { 
      int e; 
      while (((e = dup2(fd, me->set)) < 0) && (errno == EINTR)) 
        ; 
      if (e < 0) { 
        /* We hope we can use the fd at the new location... */ 
        me->set = fd; 
      } else { 
        while ((close(fd) < 0) && (errno == EINTR)) 
          ; 
      } 
    } 
    set_close_on_exec(me->set, 1); 
 
    /* Restore the poll-state for all the fds. */ 
    {FOR_EACH_ACTIVE_FD_BOX (me->backend, box) { 
        pdb_UPDATE_BLACK_BOX (me, box->fd, box->events); 
      }} 
 
  } 
 
#ifdef BACKEND_USES_CFRUNLOOP 
  static void noteEvents(CFFileDescriptorRef fdref, 
                         CFOptionFlags UNUSED(callBackTypes), 
                         void *info) 
  { 
    struct kevent kev; 
    struct timespec tv; 
    struct PollDeviceBackend_struct * this_backend; 
    int fd; 
    int numevts; 
         
    tv.tv_sec = 0; 
    tv.tv_nsec = 0; 
    this_backend = (struct PollDeviceBackend_struct *)info; 
    fd = CFFileDescriptorGetNativeDescriptor(fdref); 
    numevts = kevent(fd, NULL, 0, this_backend->poll_fds, POLL_SET_SIZE, &tv); 
    if (numevts >= 0) 
      this_backend->event_count = numevts; 
    else 
      this_backend->event_count = 0; 
  } 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
  static struct PollDeviceBackend_struct **pdb_backends = NULL; 
  static int num_pdb_backends = 0; 
  static int pdb_backends_size = 0; 
 
  /* Called from the init callback. */ 
  static void register_pdb_backend(struct PollDeviceBackend_struct *me) 
  { 
    if (num_pdb_backends == pdb_backends_size) { 
      struct PollDeviceBackend_struct **new_backends = 
        xrealloc(pdb_backends, 
                 (pdb_backends_size+1) * 
                 sizeof(struct PollDeviceBackend_struct *)*2); 
      pdb_backends = new_backends; 
      pdb_backends_size = (pdb_backends_size+1)*2; 
    } 
    pdb_backends[num_pdb_backends++] = me; 
  } 
 
  /* Called from the exit callback. */ 
  static void unregister_pdb_backend(struct PollDeviceBackend_struct *me) 
  { 
    int i = num_pdb_backends; 
    /* Search backwards since new backends are more likely to be destructed 
     * than old backends. 
     */ 
    while (i--) { 
      if (pdb_backends[i] == me) { 
        pdb_backends[i] = pdb_backends[--num_pdb_backends]; 
        pdb_backends[num_pdb_backends] = NULL; 
        return; /* A backend is only supposed to be registered once. */ 
      } 
    } 
  } 
 
  /* Called in the child after fork(). */ 
  static void reopen_all_pdb_backends(struct callback *UNUSED(cb), 
                                      void *UNUSED(a), 
                                      void *UNUSED(b)) 
  { 
    int i; 
    for (i=0; i < num_pdb_backends; i++) { 
      pdb_REOPEN_POLL_DEVICE(pdb_backends[i]); 
    } 
  } 
 
  static void pdb_update_fd_set(struct Backend_struct *me, 
                                struct PollDeviceBackend_struct *pdb, int fd, 
                                int old_events, int new_events, 
#ifdef BACKEND_USES_POLL_DEVICE 
                                int PDUNUSED(flags) 
#else 
                                int flags 
#endif 
                                ) 
  { 
    int changed_events = old_events ^ new_events; 
 
    PDWERR("[%d]BACKEND[%d]: pdb_update_fd_set(.., %d, %d, %d, %d):\n", 
           THR_NO, me->id, fd, old_events, new_events, flags); 
 
 
    if (changed_events) { 
 
#ifdef BACKEND_USES_POLL_DEVICE 
 
      pdb_UPDATE_BLACK_BOX(pdb, fd, new_events); 
 
#elif defined(BACKEND_USES_KQUEUE) 
      struct kevent ev[2]; 
 
      if (changed_events & (PIKE_BIT_FD_READ|PIKE_BIT_FD_READ_OOB)) { 
        if (new_events & PIKE_BIT_FD_READ_OOB) { 
          /* We want OOB even if there's no in-band data. */ 
          PDWERR("[%d]BACKEND[%d]: fd:%d READ, ADD, [OOBAND]\n", 
                 THR_NO, me->id, fd); 
          EV_SET(ev, fd, EVFILT_READ, EV_ADD|EV_OOBAND, 0, 0, 0); 
          EV_SET(ev+1, fd, EVFILT_READ, EV_ENABLE|EV_OOBAND, 0, 0, 0); 
          kevent(pdb->set, ev, 2, NULL, 0, NULL); 
        } else if (new_events & PIKE_BIT_FD_READ) { 
          PDWERR("[%d]BACKEND[%d]: fd:%d READ, ADD\n", THR_NO, me->id, fd); 
          EV_SET(ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); 
          EV_SET(ev+1, fd, EVFILT_READ, EV_ENABLE, 0, 0, 0); 
          kevent(pdb->set, ev, 2, NULL, 0, NULL); 
        } else { 
          PDWERR("[%d]BACKEND[%d]: fd:%d READ, DISABLE\n", THR_NO, me->id, fd); 
          pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLIN); 
        } 
      } 
 
      if (changed_events & (PIKE_BIT_FD_WRITE|PIKE_BIT_FD_WRITE_OOB)) { 
        if (new_events & (PIKE_BIT_FD_WRITE|PIKE_BIT_FD_WRITE_OOB)) { 
          PDWERR("[%d]BACKEND[%d]: fd:%d WRITE, ADD\n", THR_NO, me->id, fd); 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLOUT); 
        } else { 
          PDWERR("[%d]BACKEND[%d]: fd:%d WRITE, DISABLE\n", 
                 THR_NO, me->id, fd); 
          pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLOUT); 
        } 
      } 
 
#else  /* !BACKEND_USES_POLL_DEVICE */ 
      if (changed_events & PIKE_BIT_FD_READ) { 
        if (new_events & PIKE_BIT_FD_READ) { 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLIN); 
          /* Got to enable the exception set to get errors (at least 
           * according to POSIX). */ 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLEXCEPT); 
        } 
        else { 
          pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLIN); 
          if (!(new_events & PIKE_BIT_FD_READ_OOB) && 
              !(new_events & PIKE_BIT_FD_WRITE)) 
            /* Exceptions might cause calls to read, read_oob and write. */ 
            pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT); 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_READ_OOB) { 
        if (new_events & PIKE_BIT_FD_READ_OOB) 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLRDBAND); 
        else { 
          if (!(new_events & PIKE_BIT_FD_READ)) { 
            if (!(new_events & PIKE_BIT_FD_WRITE)) 
              /* Exceptions might cause calls to read, read_oob and write. */ 
              pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT); 
          } else { 
            pdb_MY_FD_CLR_RDBAND(&pdb->set, fd); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE) { 
        if (new_events & PIKE_BIT_FD_WRITE) { 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLOUT); 
          /* Got to enable the exception set to get errors (at least 
           * according to POSIX). */ 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLEXCEPT); 
        } 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) { 
            pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLOUT); 
            if (!(new_events & PIKE_BIT_FD_READ) && 
                !(new_events & PIKE_BIT_FD_READ_OOB)) 
              /* Exceptions might cause calls to read, read_oob and write. */ 
              pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLEXCEPT); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE_OOB) { 
        if (new_events & PIKE_BIT_FD_WRITE_OOB) 
          pdb_MY_FD_SET(&pdb->set, fd, MY_POLLWRBAND); 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE)) { 
            pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLWREXCEPT); 
          } else { 
            pdb_MY_FD_CLR_WRBAND(&pdb->set, fd); 
          } 
        } 
      } 
 
// TODO kqueue ADD fflags 
      if (changed_events & PIKE_BIT_FD_FS_EVENT) { 
        if (new_events & PIKE_BIT_FD_FS_EVENT) 
          pdb_MY_FD_SET2(&pdb->set, fd, MY_POLLFSEVENT, flags); 
        else { 
            pdb_MY_FD_CLR(&pdb->set, fd, MY_POLLFSEVENT); 
        } 
      } 
 
#endif  /* !BACKEND_USES_POLL_DEVICE */ 
 
      if (new_events & ~old_events) 
        /* New events were added. */ 
        backend_wake_up_backend(me); 
    } 
  } 
 
  /* Mapping of events to flags and callbacks. 
   * 
   * Event      select  poll            kqueue                  callback 
   * 
   * data_in    read    POLLIN          EVFILT_READ             READ 
   *                    POLLRDNORM      EVFILT_READ[EOF] 
   * 
   * data_out   write   POLLOUT         EVFILT_WRITE            WRITE 
   *                    POLLWRNORM 
   * 
   * oob_in     except  POLLPRI         EVFILT_READ[EV_OOBAND]  READ_OOB 
   *                    POLLRDBAND 
   * 
   * oob_out    write   POLLWRBAND      (EVFILT_WRITE)          WRITE_OOB 
   * 
   * close_in   read    POLLIN          EVFILT_READ[EOF]        READ 
   *                    POLLHUP(Linux pipe) 
   * 
   * close_out  write   POLLHUP         EVFILT_WRITE[EOF]       WRITE 
   *                    POLLERR(Linux pipe)                     >WRITE_OOB 
   * 
   * conn_ok    write   POLLOUT         EVFILT_WRITE            WRITE 
   * 
   * conn_fail  read    POLLIN          EVFILT_READ[EOF]        READ 
   *            except                                          (READ_OOB) 
   * 
   * new_conn   read    POLLIN          EVFILT_READ             READ 
   * 
   * sock_err   except  POLLERR         EVFILT_READ[ERR]        ERROR 
   *                                                            >READ 
   * 
   * sock_err   except  POLLERR         EVFILT_WRITE[ERR]       ERROR 
   *                                                            >WRITE 
   */ 
 
#ifdef POLL_DEBUG 
  static void pdb_describe_event(struct Backend_struct *me, POLL_EVENT event) 
  { 
#ifdef BACKEND_USES_KQUEUE 
    fprintf(stderr, "[%d]BACKEND[%d]: fd:%d filter:%d flags:0x%08x", 
            THR_NO, me->id, PDB_GET_FD(event), PDB_GET_EVENTS(event), 
            event.flags); 
    if (PDB_CHECK_EVENT(event, MY_POLLIN)) { 
      fprintf(stderr, "  EVFILT_READ"); 
    } else if (PDB_CHECK_EVENT(event, MY_POLLOUT)) { 
      fprintf(stderr, "  EVFILT_WRITE"); 
    } else if (PDB_CHECK_EVENT(event, MY_POLLFSEVENT)) { 
      fprintf(stderr, "  POLLFSEVENT"); 
    } else { 
      fprintf(stderr, "  UNKNOWN"); 
    } 
    if (event.flags & EV_OOBAND) { 
      fprintf(stderr, "[OOBAND]"); 
    } 
    if (event.flags & EV_ERROR) { 
      fprintf(stderr, "[ERROR]"); 
    } 
    if (event.flags & EV_EOF) { 
      fprintf(stderr, "[EOF]"); 
    } 
    fprintf(stderr, "(%d)\n", event.data); 
#else /* !BACKEND_USES_KQUEUE */ 
    fprintf(stderr, "[%d]BACKEND[%d]: fd:%d events:0x%04x", 
            THR_NO, me->id, PDB_GET_FD(event), PDB_GET_EVENTS(event)); 
    if (PDB_CHECK_EVENT(event, MY_POLLNVAL)) { 
      fprintf(stderr, "  POLLNVAL"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLERR)) { 
      fprintf(stderr, "  POLLERR"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLHUP)) { 
      fprintf(stderr, "  POLLHUP"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLRDBAND)) { 
      fprintf(stderr, "  POLLRDBAND"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLIN)) { 
      fprintf(stderr, "  POLLIN"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLWRBAND)) { 
      fprintf(stderr, "  POLLWRBAND"); 
    } 
    if (PDB_CHECK_EVENT(event, MY_POLLOUT)) { 
      fprintf(stderr, "  POLLOUT"); 
    } 
    fprintf(stderr, "\n"); 
#endif /* BACKEND_USES_KQUEUE */ 
  } 
#else /* !POLL_DEBUG */ 
#define pdb_describe_event(BACKEND, EVENT) 
#endif /* POLL_DEBUG */ 
 
  /* A negative tv_sec in timeout turns it off. If it ran until the 
   * timeout without calling any callbacks or call outs (except those 
   * on backend_callbacks) then tv_sec will be set to -1. Otherwise it 
   * will be set to the time spent. */ 
  static void pdb_low_backend_once(struct PollDeviceBackend_struct *pdb, 
                                   struct timeval *timeout) 
  { 
    ONERROR uwp; 
    int i, done_something = 0; 
    struct timeval start_time = *timeout; 
    struct Backend_struct *me = pdb->backend; 
 
#ifdef DECLARE_POLL_EXTRAS 
    /* Declare any extra variables needed by MY_POLL(). */ 
    DECLARE_POLL_EXTRAS; 
#endif /* DECLARE_POLL_EXTRAS */ 
 
    if ((done_something = low_backend_once_setup(pdb->backend, &start_time))) { 
      goto low_backend_round_done; 
    } 
    SET_ONERROR(uwp, low_backend_cleanup, me); 
 
    if (TYPEOF(me->before_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->before_callback); 
 
    { 
#ifdef BACKEND_USES_CFRUNLOOP 
          double cf_timeout; 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
#ifdef TIMEOUT_IS_MILLISECONDS 
      int poll_timeout; 
#elif defined(TIMEOUT_IS_TIMEVAL) 
      struct timeval poll_timeout; 
#elif defined(TIMEOUT_IS_TIMESPEC) 
      struct timespec poll_timeout; 
#else 
#error Unknown timeout method. 
#endif /* TIMEOUT_IS_* */ 
      struct timeval *next_timeout = &pdb->backend->next_timeout; 
 
      me->may_need_wakeup = 1; 
 
#ifdef TIMEOUT_IS_MILLISECONDS 
      if (next_timeout->tv_sec >= 100000000) 
        /* Take this as waiting forever. */ 
        poll_timeout = -1; 
      else if(next_timeout->tv_sec < 0) 
        poll_timeout = 0; 
      else if(next_timeout->tv_sec > (INT_MAX/1002)) /* about 24 days.*/ 
        poll_timeout = INT_MAX/1002; 
      else 
        poll_timeout = MAXIMUM((next_timeout->tv_sec*1000) + 
                               next_timeout->tv_usec/1000,2); 
#elif defined(TIMEOUT_IS_TIMEVAL) 
      poll_timeout = *next_timeout; 
#elif defined(TIMEOUT_IS_TIMESPEC) 
      poll_timeout.tv_sec = next_timeout->tv_sec; 
      poll_timeout.tv_nsec = next_timeout->tv_usec*1000; 
#else 
#error Unknown timeout method. 
#endif /* TIMEOUT_IS_* */ 
 
      PDWERR("[%d]BACKEND[%d]: Doing poll on fds:\n", THR_NO, me->id); 
 
      check_threads_etc(); 
      THREADS_ALLOW(); 
 
      /* Note: The arguments to MY_POLL may be evaluated multiple times. */ 
 
#ifdef BACKEND_USES_CFRUNLOOP 
    if(pdb->go_cf) 
    { 
      cf_timeout = next_timeout->tv_sec + (next_timeout->tv_usec / 1000000.0); 
 
      pdb->event_count = 0; 
      pdb->poll_fds = (poll_fds); 
 
      CFFileDescriptorEnableCallBacks(pdb->fdref, 
                                      kCFFileDescriptorReadCallBack); 
      CFRunLoopAddSource(CFRunLoopGetCurrent(), pdb->source, 
                         kCFRunLoopDefaultMode); 
      CFRunLoopRunInMode(kCFRunLoopDefaultMode, cf_timeout, true); 
 
      i = pdb->event_count; 
      pdb->poll_fds = NULL; 
} 
else 
#endif /* BACKEND_USES_CFRUNLOOP */ 
      i = PDB_POLL(pdb->set, poll_timeout); 
 
      PDWERR(" => %d (timeout was: %d)\n", i, poll_timeout); 
 
      THREADS_DISALLOW(); 
      check_threads_etc(); 
      me->may_need_wakeup = 0; 
      INVALIDATE_CURRENT_TIME(); 
    } 
 
    if (TYPEOF(me->after_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->after_callback); 
 
    if (!i) { 
      /* Timeout */ 
    } else if (i>0) { 
      int num_active = i; 
      struct fd_callback_box fd_list = { 
        me, NULL, &fd_list, 
        -1, 0, 0, 
        0, 0, NULL 
      }; 
      struct fd_callback_box *box; 
      ONERROR free_fd_list; 
 
      SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list); 
 
      done_something = 1; 
 
 
#if 0 
      /* First clear revents for all the fds. 
       * 
       * FIXME: This is done for paranoia reasons. If all code that 
       *        messes with fds clears revents, this isn't needed. 
       * 
       * Note: This needs to be a separate loop, since kqueue sends 
       *       read and write in two separate events. 
       */ 
      while(i--) 
      { 
        int fd = PDB_GET_FD(poll_fds[i]); 
 
#ifdef BACKEND_USES_KQUEUE 
      if(poll_fds[i].filter == MY_POLLSIGNAL) 
      { 
        continue; 
      } 
#endif /* BACKEND_USES_KQUEUE */ 
 
        box = SAFE_GET_ACTIVE_BOX (me, fd); 
        if (box) { 
          check_box (box, fd); 
          box->revents = 0; 
          box->rflags = 0; 
        } 
      } 
#endif 
 
      /* Then flag the active events. 
       */ 
      i = num_active; 
      while(i--) 
      { 
        int fd = PDB_GET_FD(poll_fds[i]); 
 
        pdb_describe_event(me, poll_fds[i]); 
 
        if (!(box = SAFE_GET_ACTIVE_BOX (me, fd))) { 
          /* The box is no longer active. */ 
          continue; 
        } 
 
#ifdef MY_POLLNVAL 
        if(PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLNVAL)) 
        { 
          struct pollfd fds; 
          int ret; 
          /* NOTE: /dev/poll returns POLLNVAL for closed descriptors. */ 
          PDWERR("[%d]BACKEND[%d]: POLLNVAL on %d\n", THR_NO, me->id, fd); 
#ifdef PIKE_DEBUG 
#ifdef HAVE_POLL 
          /* FIXME */ 
 
          fds.fd=fd; 
          fds.events=POLLIN; 
          fds.revents=0; 
          ret=poll(&fds, 1,1 ); 
          if(fds.revents & POLLNVAL) 
            Pike_fatal("Bad filedescriptor %d to poll().\n", fd); 
#endif 
          /* Don't do anything further with this fd. */ 
          continue; 
#endif /* PIKE_DEBUG */ 
        } 
#endif /* MY_POLLNVAL */ 
 
        check_box (box, fd); 
 
        { 
#ifdef PIKE_DEBUG 
          int handled = 0; 
#endif /* PIKE_DEBUG */ 
          if (PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLERR)) { 
            /* Errors are signalled on the first available callback. */ 
            PDWERR("[%d]BACKEND[%d]: POLLERR on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_ERROR; 
            /* Note that Linux pipe's signal close in the write-direction 
             * with POLLERR. 
             * 
             * FIXME: Signal on write-direction? 
             */ 
#ifdef BACKEND_USES_KQUEUE 
            /* kqueue signals errors as read or write events but 
             * with an additional error flag, so we must take care 
             * to not set any read/write bits if it's a sole error 
             * event. */ 
            if (!poll_fds[i].data) goto next_fd; 
#endif 
          } 
          if (PDB_CHECK_ERROR_EVENT(poll_fds[i], MY_POLLHUP)) { 
            PDWERR("[%d]BACKEND[%d]: POLLHUP on %d\n", THR_NO, me->id, fd); 
            /* Linux signals close in the read-direction of pipes 
             * and fifos with POLLHUP. */ 
            box->revents |= PIKE_BIT_FD_READ|PIKE_BIT_FD_READ_OOB; 
            /* For historical reasons we also signal on the write-drection. */ 
            box->revents |= PIKE_BIT_FD_WRITE|PIKE_BIT_FD_WRITE_OOB; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLRDBAND) 
#ifdef BACKEND_USES_KQUEUE 
              && (!EV_OOBAND || PDB_CHECK_ERROR_EVENT(poll_fds[i], EV_OOBAND)) 
#endif 
              ) { 
            PDWERR("[%d]BACKEND[%d]: POLLRDBAND on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_READ_OOB; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLIN)) { 
            PDWERR("[%d]BACKEND[%d]: POLLRDNORM|POLLIN on %d\n", 
                   THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_READ; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLWRBAND)) { 
            PDWERR("[%d]BACKEND[%d]: POLLWRBAND on %d\n", 
                   THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_WRITE_OOB; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLOUT)) { 
            PDWERR("[%d]BACKEND[%d]: POLLOUT on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_WRITE; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(poll_fds[i], MY_POLLFSEVENT)) { 
            PDWERR("[%d]BACKEND[%d]: POLLFSEVENT on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_FS_EVENT; 
            DO_IF_DEBUG(handled = 1); 
          } 
#ifdef PIKE_DEBUG 
          if (!handled && PDB_GET_EVENTS(poll_fds[i])) { 
            fprintf(stderr, "[%d]BACKEND[%d]: fd %ld has revents 0x%08lx, " 
                    "but hasn't been handled.\n", THR_NO, me->id, 
                    (long)PDB_GET_FD(poll_fds[i]), 
                    (long)PDB_GET_EVENTS(poll_fds[i])); 
            pdb_describe_event(me, poll_fds[i]); 
          } 
#endif /* PIKE_DEBUG */ 
        } 
        if (box->revents) { 
          if (!(box->revents & (box->events | PIKE_BIT_FD_ERROR))) { 
            /* Robustness paranoia; we've only received events that we 
             * aren't interested in. Unregister the unwanted events 
             * in case we are out of sync with the poll device. 
             * Otherwise we risk entering a busy loop. 
             */ 
            PDWERR("[%d]BACKEND[%d]: Backend is out of sync for fd %d\n" 
                   "[%d]BACKEND[%d]: Wanted: 0x%04x Received: 0x%04x\n", 
                   THR_NO, me->id, fd, 
                   THR_NO, me->id, box->events, box->revents); 
            pdb_update_fd_set(me, pdb, fd, box->revents|box->events, 
                              box->events, box->flags); 
          } 
        next_fd: 
          /* Hook in the box on the fd_list. */ 
          if (!box->next) { 
            PDWERR("[%d]BACKEND[%d]: hooking in box for fd %d\n", 
                   THR_NO, me->id, fd); 
            box->next = fd_list.next; 
            fd_list.next = box; 
            if (box->ref_obj) add_ref(box->ref_obj); 
          } else { 
            PDWERR("[%d]BACKEND[%d]: fd %d already in list.\n", 
                   THR_NO, me->id, fd); 
          } 
        } 
      } 
 
      /* Common code for all variants. 
       * 
       * Call callbacks for the active events. 
       */ 
      if (backend_call_active_callbacks(&fd_list, me)) { 
        CALL_AND_UNSET_ONERROR(free_fd_list); 
        goto backend_round_done; 
      } 
 
      CALL_AND_UNSET_ONERROR(free_fd_list); 
 
      /* Must be up-to-date for backend_do_call_outs. */ 
      INVALIDATE_CURRENT_TIME(); 
    }else{ 
      switch(errno) 
      { 
#ifdef __NT__ 
      default: 
        Pike_fatal("Error in backend %d\n",errno); 
        break; 
#endif 
 
      case EINVAL: 
        Pike_fatal("Invalid timeout to select().\n"); 
        break; 
 
#ifdef WSAEINTR 
      case WSAEINTR: 
#endif 
      case EINTR:               /* ignore */ 
        break; 
 
#ifdef WSAEBADF 
      case WSAEBADF: 
#endif 
#ifdef ENOTSOCK 
      case ENOTSOCK: 
#endif 
#ifdef WSAENOTSOCK 
      case WSAENOTSOCK: 
#endif 
      case EBADF: 
        /* TODO: Fix poll version! */ 
        break; 
 
      } 
    } 
 
    { 
      int call_outs_called = 
        backend_do_call_outs(me); /* Will update current_time after calls. */ 
      if (call_outs_called) 
        done_something = 1; 
      if (call_outs_called < 0) 
        goto backend_round_done; 
    } 
 
    call_callback(&me->backend_callbacks, NULL); 
 
  backend_round_done: 
 
#ifdef PIKE_THREADS 
    me->done_counter += done_something; 
 
    co_broadcast(&me->backend_signal); 
#endif 
 
    CALL_AND_UNSET_ONERROR (uwp); 
 
  low_backend_round_done: 
    if (done_something <= 0) 
      timeout->tv_sec = -1; 
    else { 
      struct timeval now; 
      INACCURATE_GETTIMEOFDAY(&now); 
      timeout->tv_sec = now.tv_sec; 
      timeout->tv_usec = now.tv_usec; 
      my_subtract_timeval (timeout, &start_time); 
    } 
  } 
 
#ifdef BACKEND_USES_CFRUNLOOP 
  void cfTimerCallback(CFRunLoopTimerRef UNUSED(timer), void * info) 
  { 
    struct PollDeviceBackend_struct *me; 
    struct timeval timeout; 
    me = (struct PollDeviceBackend_struct *)info; 
 
    timeout.tv_sec = 0; 
    timeout.tv_usec = 0; 
 
    pdb_low_backend_once(me, &timeout); 
  } 
 
  /* our external CFRunLoop has received events or timed out. We should 
     do a once-through the runloop to find out if there's anything to do. */ 
  void cfObserverCallback(CFRunLoopObserverRef UNUSED(observer), 
      CFRunLoopActivity activity, void* info) 
  { 
    struct timeval timeout; 
    struct thread_state *cur_ts__; 
    struct PollDeviceBackend_struct *me; 
    me = (struct PollDeviceBackend_struct *)info; 
 
    switch(activity) 
    { 
      case kCFRunLoopEntry: 
        /* release the interpreter lock. */ 
        me->gil_released = 1; 
        { 
          cur_ts__ = Pike_interpreter.thread_state; 
          me->thread_state = cur_ts__; /* for use later. */ 
          pike_threads_allow (cur_ts__ COMMA_DLOC); 
        } 
        break; 
 
      case kCFRunLoopBeforeTimers: 
      case kCFRunLoopBeforeSources: 
        /* gain the interpreter lock, if released. */ 
        if(me->gil_released) 
        { 
          cur_ts__ = me->thread_state; 
          me->gil_released = 0; 
          pike_threads_disallow (cur_ts__ COMMA_DLOC); 
          me->thread_state = 0; 
        } 
        break; 
 
      case kCFRunLoopExit: 
        if(me->gil_released) 
        { 
          cur_ts__ = me->thread_state; 
          me->gil_released = 0; 
          pike_threads_disallow (cur_ts__ COMMA_DLOC); 
          me->thread_state = 0; 
        } 
        timeout.tv_sec = 0; 
        timeout.tv_usec = 0; 
        pdb_low_backend_once(me, &timeout); 
        break; 
    } 
 
  } 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
  /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time) 
   *!   Perform one pass through the backend. 
   *! 
   *!   Calls any outstanding call-outs and non-blocking I/O 
   *!   callbacks that are registred in this backend object. 
   *! 
   *! @param sleep_time 
   *!   Wait at most @[sleep_time] seconds. The default when 
   *!   unspecified or the integer @expr{0@} is no time limit. 
   *! 
   *! @returns 
   *!   If the backend did call any callbacks or call outs then the 
   *!   time spent in the backend is returned as a float. Otherwise 
   *!   the integer @expr{0@} is returned. 
   *! 
   *! @seealso 
   *!   @[Pike.DefaultBackend], @[main()] 
   */ 
  PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time) 
  { 
    struct timeval timeout;     /* Got bogus gcc warning on timeout.tv_usec. */ 
 
    timeout.tv_sec = 0; 
    timeout.tv_usec = 0; 
 
    if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) { 
      timeout.tv_sec = (long) floor (sleep_time->u.float_number); 
      timeout.tv_usec = 
        (long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6); 
    } 
    else if (sleep_time && TYPEOF(*sleep_time) == T_INT && 
             sleep_time->u.integer) { 
      SIMPLE_ARG_TYPE_ERROR("`()", 1, "float|int(0..0)"); 
    } 
    else 
      timeout.tv_sec = -1; 
 
    pdb_low_backend_once(THIS, &timeout); 
 
    pop_n_elems (args); 
    if (timeout.tv_sec < 0) 
      push_int (0); 
    else 
      push_float((FLOAT_TYPE) 
                 ((double)timeout.tv_sec + (double)timeout.tv_usec / 1e6)); 
  } 
 
#ifdef BACKEND_USES_CFRUNLOOP 
/*! @decl int enable_core_foundation(int(0..1) enable) 
 *!   On systems with CoreFoundation (OSX, iOS, etc), use CoreFoundation 
 *!   to poll for events. This enables system level technologies that rely 
 *!   on CoreFoundation Runloops to function properly. 
 *! 
 *!  @param enable 
 *!    enable or disable this functionality 
 *! 
 *!  @returns 
 *!    the previous value of this setting. 
 *! 
 */ 
PIKEFUN int enable_core_foundation(int enable) 
{ 
  int x = THIS->go_cf; 
 
  if(enable && !THIS->go_cf) 
  { 
    THIS->go_cf = 1; 
    init_cf(THIS, THIS->set); 
  } 
  else if(!enable && THIS->go_cf) 
  { 
    THIS->go_cf = 0; 
    if(THIS->external_run) 
      exit_external_cfrl(THIS); 
    exit_cf(THIS); 
  } 
 
  RETURN x; 
} 
 
/*! @decl int query_core_foundation_enabled() 
 *! 
 *! On systems with CoreFoundation (OSX, iOS, etc), indicate whether 
 *! CoreFoundation is being used by this backend to poll for events. 
 *! 
 *! @returns 
 *! the current state of CoreFoundation polling: 1=enabled, 0=disabled 
 *! 
 */ 
PIKEFUN int query_core_foundation_enabled() 
{ 
  RETURN THIS->go_cf; 
} 
 
/*! @decl int enable_external_runloop(int(0..1) enable) 
 *!   On systems with CoreFoundation (OSX, iOS, etc), delegate 
 *!   running of the Pike Backend to the main runloop of the 
 *!   process (such as a Cocoa application's NSRunLoop). 
 *! 
 *!   Enabling the external runloop allows Pike callouts and 
 *!   callback-based I/O to function normally while greatly reducing 
 *!   cpu utilization compared to running the external runloop 
 *!   manually. 
 *! 
 *!  @param enable 
 *!    enable or disable this functionality 
 *! 
 *!  @returns 
 *!    the previous value of this setting. 
 *! 
 */ 
PIKEFUN int enable_external_runloop(int enable) 
{ 
  int x = THIS->external_run; 
 
  if(enable && !THIS->external_run) 
  { 
    THIS->external_run = 1; 
 
    if(!THIS->go_cf) 
      init_cf(THIS, THIS->set); 
 
    init_external_cfrl(THIS, THIS->set); 
  } 
  else if(!enable && THIS->external_run) 
  { 
    THIS->external_run = 0; 
    exit_external_cfrl(THIS); 
  } 
 
  RETURN x; 
} 
 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
 
  EXTRA 
  { 
    pdb_offset = Pike_compiler->new_program->inherits[1].storage_offset - 
      Pike_compiler->new_program->inherits[0].storage_offset; 
 
    /* /dev/poll and kqueue fds get invalidated at fork. */ 
    dmalloc_accept_leak(add_to_callback(&fork_child_callback, 
                                        reopen_all_pdb_backends, NULL, NULL)); 
 
#ifdef BACKEND_USES_CFRUNLOOP 
    add_integer_constant("HAVE_CORE_FOUNDATION", 1, 0); 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
#ifdef BACKEND_USES_KQUEUE 
    add_integer_constant("HAVE_KQUEUE", 1, 0); 
#endif /* BACKEND_USES_KQUEUE */ 
 
  } 
 
  INIT 
  { 
    struct Backend_struct *me = 
      THIS->backend = (struct Backend_struct *)(((char *)THIS) + pdb_offset); 
 
    PDWERR("[%d]BACKEND[%d]: Registering device backend...\n", 
           THR_NO, me->id); 
 
    me->update_fd_set_handler = (update_fd_set_handler_fn *) pdb_update_fd_set; 
    me->handler_data = THIS; 
 
    PDWERR("[%d]BACKEND[%d]: Registering backend...\n", THR_NO, me->id); 
    register_pdb_backend(THIS); 
 
    PDWERR("[%d]BACKEND[%d]: Opening poll device...\n", THR_NO, me->id); 
    if ((THIS->set = OPEN_POLL_DEVICE(THIS)) < 0) { 
      Pike_error("Failed to open poll device (errno:%d)\n", errno); 
    } 
    set_close_on_exec(THIS->set, 1); 
  } 
 
  EXIT 
    gc_trivial; 
  { 
    struct Backend_struct *me = THIS->backend; 
 
    PDWERR("[%d]BACKEND[%d]: Closing poll device...\n", THR_NO, me->id); 
 
#ifdef BACKEND_USES_CFRUNLOOP 
    if(THIS->external_run) 
      exit_external_cfrl(THIS); 
    exit_cf(THIS); 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
    if (THIS->set >= 0) 
      fd_close(THIS->set); 
 
    unregister_pdb_backend(THIS); 
  } 
} 
 
/*! @endclass 
 */ 
 
#endif /* BACKEND_USES_POLL_DEVICE || BACKEND_USES_KQUEUE */ 
 
#ifdef BACKEND_USES_CFRUNLOOP 
static void check_set_timer(struct timeval tmp) 
{ 
  { 
    /* register a timer if the backend is using external cfrl. */ 
    struct external_variable_context loc; 
    struct PollDeviceBackend_struct * pdb = NULL; 
    char * _st = NULL; 
    loc.o = Pike_fp->current_object; 
    loc.parent_identifier = 0; 
    loc.inherit = Pike_fp->context; 
 
    find_external_context(&loc, 1); 
 
    if (!loc.o->prog) 
    { 
      Pike_error ("Cannot access storage of destructed parent object.\n"); 
    } 
 
    _st = get_storage(loc.o, PollDeviceBackend_program); 
 
    pdb = (struct PollDeviceBackend_struct *)_st; 
 
    if(pdb && pdb->external_run) 
    { 
      CFRunLoopTimerContext context = {0, pdb, NULL, NULL, NULL}; 
      CFRunLoopTimerRef timer; 
      CFTimeInterval when = 0.0; 
 
      when = (CFTimeInterval)tmp.tv_sec + 
        (1.0E-6 * (CFTimeInterval) tmp.tv_usec); 
      timer = CFRunLoopTimerCreate(NULL, CFAbsoluteTimeGetCurrent() + when, 0, 
                                   0, 0, &cfTimerCallback, &context); 
 
      if(timer) 
      { 
        /* set the timer. */ 
        CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer, kCFRunLoopDefaultMode); 
      } 
      else 
      { 
        Pike_error("Unable to create run loop timer!\n"); 
      } 
    } 
  } 
} 
#endif /* BACKEND_USES_CFRUNLOOP */ 
 
#ifdef HAVE_POLL 
 
/*! @class PollBackend 
 *! @inherit __Backend 
 *! 
 *! @[Backend] implemented with @tt{poll(2)@} (SVr4, POSIX). 
 *! 
 *! @seealso 
 *!   @[Backend] 
 */ 
PIKECLASS PollBackend 
{ 
  INHERIT Backend; 
 
  /* Helpers to find the above inherit. */ 
  static ptrdiff_t pb_offset = 0; 
  CVAR struct Backend_struct *backend; 
 
  /* 
   * POLL/SELECT fd sets 
   */ 
  CVAR struct pb_selectors set; 
  CVAR struct pb_selectors active_set; 
 
  DECLARE_STORAGE 
 
  /* 
   * FD set handling 
   */ 
 
  static void pb_update_fd_set (struct Backend_struct *me, 
                                struct PollBackend_struct *pb, int fd, 
                                int old_events, int new_events, 
                                int UNUSED(flags)) 
  { 
    int changed_events = old_events ^ new_events; 
 
    PDWERR("[%d]BACKEND[%d]: pb_update_fd_set(.., %d, %d, %d):\n", 
           THR_NO, me->id, fd, old_events, new_events); 
 
 
    if (changed_events) { 
 
      if (changed_events & PIKE_BIT_FD_READ) { 
        if (new_events & PIKE_BIT_FD_READ) { 
          pb_MY_FD_SET(&pb->set, fd, MY_POLLIN); 
        } 
        else { 
          pb_MY_FD_CLR(&pb->set, fd, MY_POLLIN); 
          if (new_events & PIKE_BIT_FD_READ_OOB) 
          { 
            pb_MY_FD_SET(&pb->set, fd, MY_POLLRDBAND); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_READ_OOB) { 
        if (new_events & PIKE_BIT_FD_READ_OOB) 
          pb_MY_FD_SET(&pb->set, fd, MY_POLLRDBAND); 
        else { 
          if (!(new_events & PIKE_BIT_FD_READ)) { 
            pb_MY_FD_CLR(&pb->set, fd, MY_POLLRDBAND); 
          } else { 
            pb_MY_FD_CLR_RDBAND(&pb->set, fd); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE) { 
        if (new_events & PIKE_BIT_FD_WRITE) { 
          pb_MY_FD_SET(&pb->set, fd, MY_POLLOUT); 
        } 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) { 
            pb_MY_FD_CLR(&pb->set, fd, MY_POLLOUT); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE_OOB) { 
        if (new_events & PIKE_BIT_FD_WRITE_OOB) 
          pb_MY_FD_SET(&pb->set, fd, MY_POLLWRBAND); 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE)) { 
            pb_MY_FD_CLR(&pb->set, fd, MY_POLLWREXCEPT); 
          } else { 
            pb_MY_FD_CLR_WRBAND(&pb->set, fd); 
          } 
        } 
      } 
 
      if (new_events & ~old_events) 
        /* New events were added. */ 
        backend_wake_up_backend (me); 
    } 
  } 
 
#ifdef PIKE_DEBUG 
 
  static void pb_backend_do_debug(struct Backend_struct *me, 
                                  struct PollBackend_struct *pb) 
    { 
      int e; 
 
      /* FIXME: OOB? */ 
      for(e=0;e<pb->set.num_in_poll;e++) 
      { 
        PIKE_STAT_T tmp; 
        int ret; 
        int fd = pb->set.poll_fds[e].fd; 
 
        if (fd >= fd_map_size || fd_map[fd] != me) 
          Pike_fatal("Unmapped fd %d at %d in poll set.\n", fd, e); 
 
        do { 
          ret=fd_fstat(fd, &tmp); 
          /* FIXME: Perhaps do check_threads_etc() here? */ 
        }while(ret < 0 && errno == EINTR); 
 
        if(ret<0) 
        { 
          switch(errno) 
          { 
            case EBADF: 
              Pike_fatal("Backend filedescriptor %d is bad.\n", fd); 
              break; 
            case ENOENT: 
              Pike_fatal("Backend filedescriptor %d is not.\n", fd); 
              break; 
          } 
        } 
      } 
    } 
 
#endif  /* PIKE_DEBUG */ 
 
  /* Mapping of events to flags and callbacks. 
   * 
   * Event      select  poll            kqueue                  callback 
   * 
   * data_in    read    POLLIN          EVFILT_READ             READ 
   *                    POLLRDNORM      EVFILT_READ[EOF] 
   * 
   * data_out   write   POLLOUT         EVFILT_WRITE            WRITE 
   *                    POLLWRNORM 
   * 
   * oob_in     except  POLLPRI         (EVFILT_READ)           READ_OOB 
   *                    POLLRDBAND 
   * 
   * oob_out    write   POLLWRBAND      (EVFILT_WRITE)          WRITE_OOB 
   * 
   * close_in   read    POLLIN          EVFILT_READ[EOF]        READ 
   * 
   * close_out  write   POLLHUP         EVFILT_WRITE[EOF]       WRITE 
   *                                                            >WRITE_OOB 
   * 
   * conn_ok    write   POLLOUT         EVFILT_WRITE            WRITE 
   * 
   * conn_fail  read    POLLIN          EVFILT_READ[EOF]        READ 
   *            except                                          (READ_OOB) 
   * 
   * new_conn   read    POLLIN          EVFILT_READ             READ 
   * 
   * sock_err   except  POLLERR         EVFILT_READ[ERR]        ERROR 
   *                                                            >READ 
   * 
   * sock_err   except  POLLERR         EVFILT_WRITE[ERR]       ERROR 
   *                                                            >WRITE 
   * fs_event   NONE    NONE            EVFILT_VNODE    FSEVENT 
   */ 
 
 
  /* A negative tv_sec in timeout turns it off. If it ran until the 
   * timeout without calling any callbacks or call outs (except those 
   * on backend_callbacks) then tv_sec will be set to -1. Otherwise it 
   * will be set to the time spent. */ 
  static void pb_low_backend_once(struct PollBackend_struct *pb, 
                                  struct timeval *timeout) 
  { 
    ONERROR uwp; 
    int i, done_something = 0; 
    struct timeval start_time = *timeout; 
    struct Backend_struct *me = pb->backend; 
#ifdef DECLARE_POLL_EXTRAS 
    /* Declare any extra variables needed by MY_POLL(). */ 
    DECLARE_POLL_EXTRAS; 
#endif /* DECLARE_POLL_EXTRAS */ 
 
    if ((done_something = low_backend_once_setup(pb->backend, &start_time))) { 
      goto low_backend_round_done; 
    } 
    SET_ONERROR(uwp, low_backend_cleanup, THIS->backend); 
 
    if (TYPEOF(me->before_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->before_callback); 
 
    { 
      int poll_timeout; 
      struct timeval *next_timeout = &pb->backend->next_timeout; 
 
      me->may_need_wakeup = 1; 
 
      if (next_timeout->tv_sec >= 100000000) 
        /* Take this as waiting forever. */ 
        poll_timeout = -1; 
      else if(next_timeout->tv_sec < 0) 
        poll_timeout = 0; 
      else if(next_timeout->tv_sec > (INT_MAX/1002)) 
        poll_timeout = INT_MAX; 
      else 
        poll_timeout = (next_timeout->tv_sec*1000) + 
          next_timeout->tv_usec/1000; 
 
      pb_copy_selectors(& pb->active_set, &pb->set); 
 
      PDWERR("[%d]BACKEND[%d]: Doing poll on fds:\n", THR_NO, me->id); 
#ifdef POLL_DEBUG 
      { 
        int i; 
        for (i = 0; i < pb->active_set.num_in_poll; i++) { 
          fprintf (stderr, 
                   "[%d]BACKEND[%d]:   fd %4d: %-4s %-5s %-8s %-9s: 0x%04x\n", 
                   THR_NO, me->id, 
                   pb->active_set.poll_fds[i].fd, 
                   pb->active_set.poll_fds[i].events & (POLLRDNORM|POLLIN) ? 
                   "read" : "", 
                   pb->active_set.poll_fds[i].events & POLLOUT ? 
                   "write" : "", 
                   pb->active_set.poll_fds[i].events & POLLRDBAND ? 
                   "read_oob" : "", 
                   pb->active_set.poll_fds[i].events & POLLWRBAND ? 
                   "write_oob" : "", 
                   pb->active_set.poll_fds[i].events); 
        } 
      } 
      fprintf(stderr, "[%d]BACKEND[%d]: poll(%p, %d, %d)...", THR_NO, me->id, 
              pb->active_set.poll_fds, 
              pb->active_set.num_in_poll, 
              poll_timeout); 
#endif /* POLL_DEBUG */ 
 
      check_threads_etc(); 
      THREADS_ALLOW(); 
 
      /* Note: The arguments to MY_POLL may be evaluated multiple times. */ 
      i = PB_POLL(pb->active_set, poll_timeout); 
 
      PDWERR(" => %d\n", i); 
 
      THREADS_DISALLOW(); 
      check_threads_etc(); 
      me->may_need_wakeup = 0; 
      INVALIDATE_CURRENT_TIME(); 
    } 
 
    if (TYPEOF(me->after_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->after_callback); 
 
    if (!i) { 
      /* Timeout */ 
    } else if (i>0) { 
      int num_active = i; 
      struct fd_callback_box fd_list = { 
        NULL, NULL, NULL, 
        -1, 0, 0, 
        0, 0, NULL, 
      }; 
      struct fd_callback_box *box; 
      ONERROR free_fd_list; 
 
      fd_list.backend = me; 
      fd_list.next = &fd_list; 
 
      SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list); 
 
      done_something = 1; 
 
 
      /* First clear revents for all the fds. 
       * 
       * FIXME: This is done for paranoia reasons. If all code that 
       *        messes with fds clears revents, this isn't needed. 
       * 
       * Note: this needs to be a separate loop, since kqueue sends 
       *       read and write in two separate events. 
       */ 
      for(i=0; i<pb->active_set.num_in_poll; i++) 
      { 
        int fd = PB_GET_FD(pb->active_set.poll_fds[i]); 
        box = SAFE_GET_ACTIVE_BOX (me, fd); 
        if (box) { 
          check_box (box, fd); 
          box->revents = 0; 
          box->flags = 0; 
        } 
      } 
 
      /* Then flag the active events. 
       */ 
      for(i=0; i<pb->active_set.num_in_poll; i++) 
      { 
        int fd = PB_GET_FD(pb->active_set.poll_fds[i]); 
 
        if (!(box = SAFE_GET_ACTIVE_BOX (me, fd))) { 
          /* The box is no longer active. */ 
          continue; 
        } 
 
#ifdef MY_POLLNVAL 
        if(PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLNVAL)) 
        { 
          struct pollfd fds; 
          int ret; 
          /* NOTE: /dev/poll returns POLLNVAL for closed descriptors. */ 
          PDWERR("[%d]BACKEND[%d]: POLLNVAL on %d\n", THR_NO, me->id, fd); 
#ifdef PIKE_DEBUG 
          /* FIXME */ 
 
          fds.fd=fd; 
          fds.events=POLLIN; 
          fds.revents=0; 
          ret=poll(&fds, 1,1 ); 
          if(fds.revents & POLLNVAL) 
            Pike_fatal("Bad filedescriptor %d to poll().\n", fd); 
          /* Don't do anything further with this fd. */ 
          continue; 
#endif /* PIKE_DEBUG */ 
        } 
#endif /* MY_POLLNVAL */ 
 
        check_box (box, fd); 
 
#if 0 
    if(PDB_CHECK_EVENT(poll_fds[i], MY_POLLSIGNAL)) 
    { 
      fprintf(stderr, "SIGNAL EVENT RECEIVED!\n"); 
    } 
#endif 
 
        { 
#ifdef PIKE_DEBUG 
          int handled = 0; 
#endif /* PIKE_DEBUG */ 
          if (PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLERR)) { 
            /* Errors are signalled on the first available callback. */ 
            PDWERR("[%d]BACKEND[%d]: POLLERR on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_ERROR; 
          } 
 
          if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLRDBAND)) { 
            PDWERR("[%d]BACKEND[%d]: POLLRDBAND on %d\n", THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_READ_OOB; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLIN)) { 
            PDWERR("[%d]BACKEND[%d]: POLLRDNORM|POLLIN on %d\n", 
                   THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_READ; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLWRBAND) || 
              PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLHUP)) { 
            PDWERR("[%d]BACKEND[%d]: POLLWRBAND|POLLHUP on %d\n", 
                   THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_WRITE_OOB; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLOUT) || 
              PB_CHECK_ERROR_EVENT(pb->active_set.poll_fds[i], MY_POLLHUP)) { 
            PDWERR("[%d]BACKEND[%d]: POLLOUT|POLLHUP on %d\n", 
                   THR_NO, me->id, fd); 
            box->revents |= PIKE_BIT_FD_WRITE; 
            DO_IF_DEBUG(handled = 1); 
          } 
          if (PDB_CHECK_EVENT(pb->active_set.poll_fds[i], MY_POLLFSEVENT)) { 
            PDWERR("[%d]BACKEND[%d]: POLLFSEVENT on %d\n", 
                   THR_NO, me->id, fd); 
            box->rflags = PDB_GET_FLAGS(poll_fds[i]); 
            box->revents |= PIKE_BIT_FD_FS_EVENT; 
            DO_IF_DEBUG(handled = 1); 
          } 
#ifdef PIKE_DEBUG 
          if (!handled && PB_GET_EVENTS(pb->active_set.poll_fds[i])) { 
            fprintf(stderr, "[%d]BACKEND[%d]: fd %ld has revents 0x%08lx, " 
                    "but hasn't been handled.\n", THR_NO, me->id, 
                    (long)PB_GET_FD(pb->active_set.poll_fds[i]), 
                    (long)PB_GET_EVENTS(pb->active_set.poll_fds[i])); 
            /* pdb_describe_event(me, pb->active_set.poll_fds[i]); */ 
          } 
#endif /* PIKE_DEBUG */ 
        } 
        if (box->revents) { 
        next_fd: 
          /* Hook in the box on the fd_list. */ 
          if (!box->next) { 
            PDWERR("[%d]BACKEND[%d]: hooking in box for fd %d\n", 
                   THR_NO, me->id, fd); 
            box->next = fd_list.next; 
            fd_list.next = box; 
            if (box->ref_obj) add_ref(box->ref_obj); 
          } else { 
            PDWERR("[%d]BACKEND[%d]: fd %d already in list.\n", 
                   THR_NO, me->id, fd); 
          } 
        } 
      } 
 
      /* Common code for all variants. 
       * 
       * Call callbacks for the active events. 
       */ 
      if (backend_call_active_callbacks(&fd_list, me)) { 
        CALL_AND_UNSET_ONERROR(free_fd_list); 
        goto backend_round_done; 
      } 
 
      CALL_AND_UNSET_ONERROR(free_fd_list); 
 
      /* Must be up-to-date for backend_do_call_outs. */ 
      INVALIDATE_CURRENT_TIME(); 
    }else{ 
      switch(errno) 
      { 
#ifdef __NT__ 
      default: 
        Pike_fatal("Error in backend %d\n",errno); 
        break; 
#endif 
 
      case EINVAL: 
        Pike_fatal("Invalid timeout to select().\n"); 
        break; 
 
#ifdef WSAEINTR 
      case WSAEINTR: 
#endif 
      case EINTR:               /* ignore */ 
        break; 
 
#ifdef WSAEBADF 
      case WSAEBADF: 
#endif 
#ifdef ENOTSOCK 
      case ENOTSOCK: 
#endif 
#ifdef WSAENOTSOCK 
      case WSAENOTSOCK: 
#endif 
      case EBADF: 
        /* TODO: Fix poll version! */ 
        break; 
 
      } 
    } 
 
    { 
      int call_outs_called = 
        backend_do_call_outs(me); /* Will update current_time after calls. */ 
      if (call_outs_called) 
        done_something = 1; 
      if (call_outs_called < 0) 
        goto backend_round_done; 
    } 
 
    call_callback(&me->backend_callbacks, NULL); 
 
  backend_round_done: 
 
#ifdef PIKE_THREADS 
    me->done_counter += done_something; 
 
    co_broadcast(&me->backend_signal); 
#endif 
 
    CALL_AND_UNSET_ONERROR (uwp); 
 
  low_backend_round_done: 
    if (done_something <= 0) 
      timeout->tv_sec = -1; 
    else { 
      struct timeval now; 
      INACCURATE_GETTIMEOFDAY(&now); 
      timeout->tv_sec = now.tv_sec; 
      timeout->tv_usec = now.tv_usec; 
      my_subtract_timeval (timeout, &start_time); 
    } 
  } 
 
  /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time) 
   *!   Perform one pass through the backend. 
   *! 
   *!   Calls any outstanding call-outs and non-blocking I/O 
   *!   callbacks that are registred in this backend object. 
   *! 
   *! @param sleep_time 
   *!   Wait at most @[sleep_time] seconds. The default when 
   *!   unspecified or the integer @expr{0@} is no time limit. 
   *! 
   *! @returns 
   *!   If the backend did call any callbacks or call outs then the 
   *!   time spent in the backend is returned as a float. Otherwise 
   *!   the integer @expr{0@} is returned. 
   *! 
   *! @seealso 
   *!   @[Pike.DefaultBackend], @[main()] 
   */ 
  PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time) 
  { 
    struct timeval timeout; /* Got correct gcc warning on timeout.tv_usec. */ 
 
    if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) { 
      timeout.tv_sec = (long) floor (sleep_time->u.float_number); 
      timeout.tv_usec = 
        (long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6); 
    } 
    else if (sleep_time && TYPEOF(*sleep_time) == T_INT && 
             sleep_time->u.integer) { 
      SIMPLE_ARG_TYPE_ERROR("`()", 1, "float|int(0..0)"); 
    } 
    else 
    { 
      timeout.tv_sec = -1; 
      timeout.tv_usec = 0; 
    } 
 
    pb_low_backend_once(THIS, &timeout); 
 
    pop_n_elems (args); 
    if (timeout.tv_sec < 0) 
      push_int (0); 
    else 
      push_float((FLOAT_TYPE) 
                 ((double)timeout.tv_sec + (double)timeout.tv_usec / 1e6)); 
  } 
 
  EXTRA 
  { 
    pb_offset = Pike_compiler->new_program->inherits[1].storage_offset - 
      Pike_compiler->new_program->inherits[0].storage_offset; 
 
    PDWERR("MY_POLLIN:       0x%04x\n" 
           "MY_POLLOUT:      0x%04x\n" 
           "MY_POLLEXCEPT:   0x%04x\n" 
           "MY_POLLRDBAND:   0x%04x\n" 
           "MY_POLLWREXCEPT: 0x%04x\n" 
           "MY_POLLWRBAND:   0x%04x\n", 
           MY_POLLIN, MY_POLLOUT, 
           MY_POLLEXCEPT, MY_POLLRDBAND, 
           MY_POLLWREXCEPT, MY_POLLWRBAND); 
  } 
 
  INIT 
  { 
    struct Backend_struct *me = 
      THIS->backend = (struct Backend_struct *)(((char *)THIS) + pb_offset); 
 
    PDWERR("[%d]BACKEND[%d]: init generic\n", THR_NO, me->id); 
 
#ifdef PIKE_DEBUG 
    me->debug_handler = (debug_handler_fn *) pb_backend_do_debug; 
#endif 
    me->update_fd_set_handler = (update_fd_set_handler_fn *) pb_update_fd_set; 
    me->handler_data = THIS; 
 
    THIS->set.poll_fds=0; 
    THIS->set.poll_fd_size=0; 
    THIS->set.num_in_poll=0; 
 
    THIS->active_set.poll_fds=0; 
    THIS->active_set.poll_fd_size=0; 
    THIS->active_set.num_in_poll=0; 
  } 
 
  EXIT 
    gc_trivial; 
  { 
    struct Backend_struct *me = THIS->backend; 
    int e; 
 
    PDWERR("[%d]BACKEND[%d]: exit generic backend\n", THR_NO, me->id); 
 
    if (THIS->set.poll_fds) { 
      free(THIS->set.poll_fds); 
      THIS->set.poll_fds = NULL; 
      THIS->set.poll_fd_size = 0; 
      THIS->set.num_in_poll = 0; 
    } 
    if (THIS->active_set.poll_fds) { 
      free(THIS->active_set.poll_fds); 
      THIS->active_set.poll_fds = NULL; 
      THIS->active_set.poll_fd_size = 0; 
      THIS->active_set.num_in_poll = 0; 
    } 
  } 
} 
 
/*! @endclass 
 */ 
 
#else /* HAVE_POLL */ 
/*! @class SelectBackend 
 *! @inherit __Backend 
 *! 
 *! Backend based on the classic @tt{select(2)@} system call from BSD. 
 */ 
PIKECLASS SelectBackend 
{ 
  INHERIT Backend; 
 
  /* Helpers to find the above inherit. */ 
  static ptrdiff_t sb_offset = 0; 
  CVAR struct Backend_struct *backend; 
 
  /* 
   * POLL/SELECT fd sets 
   */ 
  CVAR struct sb_selectors set; 
  CVAR struct sb_active_selectors active_set; 
 
  DECLARE_STORAGE 
 
  /* 
   * FD set handling 
   */ 
 
  static void sb_update_fd_set (struct Backend_struct *me, 
                                struct SelectBackend_struct *sb, int fd, 
                                int old_events, int new_events) 
  { 
    int changed_events = old_events ^ new_events; 
 
    PDWERR("[%d]BACKEND[%d]: sb_update_fd_set(.., %d, %d, %d):\n", 
           THR_NO, me->id, fd, old_events, new_events); 
 
 
    if (changed_events) { 
      if (changed_events & PIKE_BIT_FD_READ) { 
        if (new_events & PIKE_BIT_FD_READ) { 
          sb_MY_FD_SET(&sb->set, fd, MY_READSET); 
          /* Got to enable the exception set to get errors (at least 
           * according to POSIX). */ 
          sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET); 
        } 
        else { 
          sb_MY_FD_CLR(&sb->set, fd, MY_READSET); 
          if (!(new_events & PIKE_BIT_FD_READ_OOB) && 
              !(new_events & PIKE_BIT_FD_WRITE)) 
            /* Exceptions might cause calls to read, read_oob and write. */ 
            sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET); 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_READ_OOB) { 
        if (new_events & PIKE_BIT_FD_READ_OOB) 
          sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET); 
        else { 
          if (!(new_events & PIKE_BIT_FD_READ)) { 
            if (!(new_events & PIKE_BIT_FD_WRITE)) 
              /* Exceptions might cause calls to read, read_oob and write. */ 
              sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE) { 
        if (new_events & PIKE_BIT_FD_WRITE) { 
          sb_MY_FD_SET(&sb->set, fd, MY_WRITESET); 
          /* Got to enable the exception set to get errors (at least 
           * according to POSIX). */ 
          sb_MY_FD_SET(&sb->set, fd, MY_EXCEPTSET); 
        } 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE_OOB)) { 
            sb_MY_FD_CLR(&sb->set, fd, MY_WRITESET); 
            if (!(new_events & PIKE_BIT_FD_READ) && 
                !(new_events & PIKE_BIT_FD_READ_OOB)) 
              /* Exceptions might cause calls to read, read_oob and write. */ 
              sb_MY_FD_CLR(&sb->set, fd, MY_EXCEPTSET); 
          } 
        } 
      } 
 
      if (changed_events & PIKE_BIT_FD_WRITE_OOB) { 
        if (new_events & PIKE_BIT_FD_WRITE_OOB) 
          sb_MY_FD_SET(&sb->set, fd, MY_WRITESET); 
        else { 
          if (!(new_events & PIKE_BIT_FD_WRITE)) { 
            sb_MY_FD_CLR(&sb->set, fd, MY_WRITESET); 
          } 
        } 
      } 
 
      if (new_events & ~old_events) 
        /* New events were added. */ 
        backend_wake_up_backend (me); 
    } 
  } 
 
#ifdef PIKE_DEBUG 
 
  static void sb_backend_do_debug(struct Backend_struct *me, 
                                  struct SelectBackend_struct *sb) 
    { 
      int e; 
      PIKE_STAT_T tmp; 
 
      /* FIXME: OOB? */ 
      for(e=0;e<=sb->set.max_fd;e++) 
      { 
        if(my_FD_ISSET(e, sb->set.sets + MY_READSET) 
           || my_FD_ISSET(e, sb->set.sets + MY_WRITESET) 
           || my_FD_ISSET(e, sb->set.sets + MY_EXCEPTSET) 
          ) 
        { 
          int ret; 
 
          if (e >= fd_map_size || fd_map[e] != me) 
            Pike_fatal("Unmapped fd %d in select set.\n", e); 
 
          do { 
            ret = fd_fstat(e, &tmp); 
            /* FIXME: Perhaps do check_threads_etc() here? */ 
          }while(ret < 0 && errno == EINTR); 
 
          if(ret<0) 
          { 
            switch(errno) 
            { 
              case EBADF: 
                Pike_fatal("Backend filedescriptor %d is bad.\n",e); 
                break; 
              case ENOENT: 
                Pike_fatal("Backend filedescriptor %d is not.\n",e); 
                break; 
            } 
          } 
        } 
      } 
    } 
 
#endif  /* PIKE_DEBUG */ 
 
  /* A negative tv_sec in timeout turns it off. If it ran until the 
   * timeout without calling any callbacks or call outs (except those 
   * on backend_callbacks) then tv_sec will be set to -1. Otherwise it 
   * will be set to the time spent. */ 
  static void sb_low_backend_once(struct SelectBackend_struct *sb, 
                                  struct timeval *timeout) 
  { 
    ONERROR uwp; 
    int i, done_something = 0; 
    struct timeval start_time = *timeout; 
    struct Backend_struct *me = sb->backend; 
#ifdef DECLARE_POLL_EXTRAS 
    /* Declare any extra variables needed by MY_POLL(). */ 
    DECLARE_POLL_EXTRAS; 
#endif /* DECLARE_POLL_EXTRAS */ 
 
    if ((done_something = low_backend_once_setup(sb->backend, &start_time))) { 
      goto low_backend_round_done; 
    } 
    SET_ONERROR(uwp, low_backend_cleanup, THIS->backend); 
 
    if (TYPEOF(me->before_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->before_callback); 
 
    { 
      struct timeval poll_timeout; 
      struct timeval *next_timeout = &sb->backend->next_timeout; 
 
      me->may_need_wakeup = 1; 
 
      poll_timeout = *next_timeout; 
 
      sb_copy_selectors(& sb->active_set, &sb->set); 
 
      PDWERR("[%d]BACKEND[%d]: Doing poll on fds:\n", THR_NO, me->id); 
 
      check_threads_etc(); 
      THREADS_ALLOW(); 
 
      /* Note: The arguments to MY_POLL may be evaluated multiple times. */ 
      i = SB_SELECT(sb->active_set, poll_timeout); 
 
      PDWERR(" => %d\n", i); 
 
      THREADS_DISALLOW(); 
      check_threads_etc(); 
      me->may_need_wakeup = 0; 
      INVALIDATE_CURRENT_TIME(); 
    } 
 
    if (TYPEOF(me->after_callback) != T_INT) 
      call_backend_monitor_cb (me, &me->after_callback); 
 
    if (!i) { 
      /* Timeout */ 
    } else if (i>0) { 
      int num_active = i; 
      struct fd_callback_box fd_list = { 
        me, NULL, &fd_list, 
        -1, 0, 0, 
        0, 0, NULL 
      }; 
      struct fd_callback_box *box; 
      ONERROR free_fd_list; 
 
      SET_ONERROR(free_fd_list, do_free_fd_list, &fd_list); 
 
      done_something = 1; 
 
      for(i=0; i <= sb->active_set.max_fd; i++) 
      { 
        box = SAFE_GET_ACTIVE_BOX(me, i); 
        if (!box) continue; 
        check_box(box, i); 
 
        box->revents = 0; 
        box->flags = 0; 
 
        if(fd_FD_ISSET(i, sb->active_set.asets + MY_EXCEPTSET)) { 
          /* Check for errors. GNU libc says this isn't set on error, but 
           * POSIX does. FIXME: What bits will be set for errors on GNU 
           * systems, then? Should we always check for that? */ 
          int err = 0; 
          ACCEPT_SIZE_T len = sizeof (err); 
          PDWERR("[%d]BACKEND[%d]: exception on %d\n", THR_NO, me->id, i); 
          if (!getsockopt (i, SOL_SOCKET, SO_ERROR, (void *)&err, &len) && 
              err) { 
            PDWERR("[%d]BACKEND[%d]: error on %d, error=%d\n", 
                   THR_NO, me->id, i, err); 
            box->revents |= PIKE_BIT_FD_ERROR; 
          } else { 
            box->revents |= PIKE_BIT_FD_READ_OOB; 
          } 
        } 
 
        if(fd_FD_ISSET(i, sb->active_set.asets + MY_READSET)) { 
          PDWERR("[%d]BACKEND[%d]: read on %d\n", THR_NO, me->id, i); 
          box->revents |= PIKE_BIT_FD_READ; 
        } 
 
        if(fd_FD_ISSET(i, sb->active_set.asets + MY_WRITESET)) { 
          PDWERR("[%d]BACKEND[%d]: write on %d\n", THR_NO, me->id, i); 
          /* OOB can by BSD definition always be written, so if we can 
           * write normal data it's reasonable to assume OOB can be 
           * written too without too much risk of being thrown away. */ 
          box->revents |= PIKE_BIT_FD_WRITE | PIKE_BIT_FD_WRITE_OOB; 
        } 
 
        if (box->revents) { 
          /* Hook in the box on the fd_list. */ 
          if (!box->next) { 
            box->next = fd_list.next; 
            fd_list.next = box; 
            if (box->ref_obj) add_ref(box->ref_obj); 
          } 
        } 
      } 
 
      /* Common code for all variants. 
       * 
       * Call callbacks for the active events. 
       */ 
      if (backend_call_active_callbacks(&fd_list, me)) { 
        CALL_AND_UNSET_ONERROR(free_fd_list); 
        goto backend_round_done; 
      } 
 
      CALL_AND_UNSET_ONERROR(free_fd_list); 
 
      /* Must be up-to-date for backend_do_call_outs. */ 
      INVALIDATE_CURRENT_TIME(); 
    }else{ 
      switch(errno) 
      { 
#ifdef __NT__ 
      default: 
        Pike_fatal("Error in backend %d\n",errno); 
        break; 
#endif 
 
      case EINVAL: 
        Pike_fatal("Invalid timeout to select().\n"); 
        break; 
 
#ifdef WSAEINTR 
      case WSAEINTR: 
#endif 
      case EINTR:               /* ignore */ 
        break; 
 
#ifdef WSAEBADF 
      case WSAEBADF: 
#endif 
#ifdef ENOTSOCK 
      case ENOTSOCK: 
#endif 
#ifdef WSAENOTSOCK 
      case WSAENOTSOCK: 
#endif 
      case EBADF: 
        /* TODO: Fix poll version! */ 
 
        sb_copy_selectors(&sb->active_set, &sb->set); 
 
        timeout->tv_usec=0; 
        timeout->tv_sec=0; 
        if(SB_SELECT(sb->active_set, *timeout) < 0) 
        { 
          switch(errno) 
          { 
#ifdef WSAEBADF 
          case WSAEBADF: 
#endif 
#ifdef ENOTSOCK 
          case ENOTSOCK: 
#endif 
#ifdef WSAENOTSOCK 
          case WSAENOTSOCK: 
#endif 
          case EBADF: 
            { 
              FOR_EACH_ACTIVE_FD_BOX (me, box) { 
                fd_FD_ZERO(sb->active_set.asets + MY_READSET); 
                fd_FD_ZERO(sb->active_set.asets + MY_WRITESET); 
                fd_FD_ZERO(sb->active_set.asets + MY_EXCEPTSET); 
 
                if(my_FD_ISSET(box->fd, sb->set.sets + MY_READSET)) 
                  fd_FD_SET(box->fd, sb->active_set.asets + MY_READSET); 
                if(my_FD_ISSET(box->fd, sb->set.sets + MY_WRITESET)) 
                  fd_FD_SET(box->fd, sb->active_set.asets + MY_WRITESET); 
                if(my_FD_ISSET(box->fd, sb->set.sets + MY_EXCEPTSET)) 
                  fd_FD_SET(box->fd, sb->active_set.asets + MY_EXCEPTSET); 
 
                timeout->tv_usec=0; 
                timeout->tv_sec=0; 
 
                if(SB_SELECT(sb->active_set, *timeout) < 0) 
                { 
                  switch(errno) 
                  { 
#ifdef __NT__ 
                  default: 
#endif 
                  case EBADF: 
#ifdef WSAEBADF 
                  case WSAEBADF: 
#endif 
#ifdef ENOTSOCK 
                  case ENOTSOCK: 
#endif 
#ifdef WSAENOTSOCK 
                  case WSAENOTSOCK: 
#endif 
 
#ifdef DEBUG_MALLOC 
                    debug_malloc_dump_fd(box->fd); 
#endif 
                    Pike_fatal("Filedescriptor %d (%s) caused fatal error %d " 
                               "in backend.\n", 
                               box->fd, fd_info(box->fd), errno); 
 
                  case EINTR: 
                    break; 
                  } 
                } 
              } 
            } 
          } 
#ifdef _REENTRANT 
          /* FIXME: Extra stderr messages should not be allowed.../Hubbe */ 
          write_to_stderr("Bad filedescriptor to select().\n" 
                          "fd closed in another thread?\n", 62); 
#else /* !_REENTRANT */ 
          Pike_fatal("Bad filedescriptor to select().\n"); 
#endif /* _REENTRANT */ 
        } 
        break; 
 
      } 
    } 
 
    { 
      int call_outs_called = 
        backend_do_call_outs(me); /* Will update current_time after calls. */ 
      if (call_outs_called) 
        done_something = 1; 
      if (call_outs_called < 0) 
        goto backend_round_done; 
    } 
 
    call_callback(&me->backend_callbacks, NULL); 
 
  backend_round_done: 
 
#ifdef PIKE_THREADS 
    me->done_counter += done_something; 
 
    co_broadcast(&me->backend_signal); 
#endif 
 
    CALL_AND_UNSET_ONERROR (uwp); 
 
  low_backend_round_done: 
 
    if (done_something <= 0) 
      timeout->tv_sec = -1; 
    else { 
      struct timeval now; 
      INACCURATE_GETTIMEOFDAY(&now); 
      timeout->tv_sec = now.tv_sec; 
      timeout->tv_usec = now.tv_usec; 
      my_subtract_timeval (timeout, &start_time); 
    } 
  } 
 
  /*! @decl float|int(0..0) `()(void|float|int(0..0) sleep_time) 
   *!   Perform one pass through the backend. 
   *! 
   *!   Calls any outstanding call-outs and non-blocking I/O 
   *!   callbacks that are registred in this backend object. 
   *! 
   *! @param sleep_time 
   *!   Wait at most @[sleep_time] seconds. The default when 
   *!   unspecified or the integer @expr{0@} is no time limit. 
   *! 
   *! @returns 
   *!   If the backend did call any callbacks or call outs then the 
   *!   time spent in the backend is returned as a float. Otherwise 
   *!   the integer @expr{0@} is returned. 
   *! 
   *! @seealso 
   *!   @[Pike.DefaultBackend], @[main()] 
   */ 
  PIKEFUN float|int(0..0) `()(void|float|int(0..0) sleep_time) 
  { 
    struct timeval timeout;     /* Got bogus gcc warning on timeout.tv_usec. */ 
 
    if (sleep_time && TYPEOF(*sleep_time) == PIKE_T_FLOAT) { 
      timeout.tv_sec = (long) floor (sleep_time->u.float_number); 
      timeout.tv_usec = 
        (long) ((sleep_time->u.float_number - timeout.tv_sec) * 1e6); 
    } 
    else if (sleep_time && TYPEOF(*sleep_time) == T_INT && 
             sleep_time->u.integer) { 
      SIMPLE_ARG_TYPE_ERROR("`()", 1, "float|int(0..0)"); 
    } 
    else 
      timeout.tv_sec = -1; 
 
    sb_low_backend_once(THIS, &timeout); 
 
    pop_n_elems (args); 
    if (timeout.tv_sec < 0) 
      push_int (0); 
    else 
      push_float((FLOAT_TYPE) 
                 ((double)timeout.tv_sec + (double)timeout.tv_usec / 1e6)); 
  } 
 
  EXTRA 
  { 
    sb_offset = Pike_compiler->new_program->inherits[1].storage_offset - 
      Pike_compiler->new_program->inherits[0].storage_offset; 
  } 
 
  INIT 
  { 
    struct Backend_struct *me = 
      THIS->backend = (struct Backend_struct *)(((char *)THIS) + sb_offset); 
 
    PDWERR("[%d]BACKEND[%d]: init generic\n", THR_NO, me->id); 
 
#ifdef PIKE_DEBUG 
    me->debug_handler = (debug_handler_fn *) sb_backend_do_debug; 
#endif 
    me->update_fd_set_handler = (update_fd_set_handler_fn *) sb_update_fd_set; 
    me->handler_data = THIS; 
 
    THIS->set.max_fd=0; 
    my_FD_ZERO(THIS->set.sets + MY_READSET); 
    my_FD_ZERO(THIS->set.sets + MY_WRITESET); 
    my_FD_ZERO(THIS->set.sets + MY_EXCEPTSET); 
    /* FIXME: Should there be something else here? */ 
    /* me->set.num_fds=0; */ 
  } 
 
#ifdef POLL_DEBUG 
  EXIT 
    gc_trivial; 
  { 
    struct Backend_struct *me = THIS->backend; 
    PDWERR("[%d]BACKEND[%d]: exit generic backend\n", THR_NO, me->id); 
  } 
#endif /* POLL_DEBUG */ 
} 
 
/*! @endclass 
 */ 
#endif 
 
/*! @module DefaultBackend 
 *!   This is the @[Backend] object that files and call_outs are 
 *!   handled by by default. 
 *! 
 *!   This is also the @[Backend] object that will be used if @[main()] 
 *!   returns @expr{-1@}. 
 *! 
 *! @seealso 
 *!   @[Backend], @[Stdio.File()->set_nonblocking()], @[call_out()] 
 */ 
 
/*! @endmodule 
 */ 
 
/*! @endmodule 
 */ 
 
/*! @decl mixed call_out(function f, float|int delay, mixed ... args) 
 *! @decl void _do_call_outs() 
 *! @decl int find_call_out(function f) 
 *! @decl int find_call_out(mixed id) 
 *! @decl int remove_call_out(function f) 
 *! @decl int remove_call_out(function id) 
 *! @decl array(array) call_out_info() 
 *!   These are aliases for the corresponding functions in 
 *!   @[Pike.DefaultBackend]. 
 *! 
 *! @seealso 
 *!   @[Pike.Backend()->call_out()], @[Pike.Backend()->_do_call_outs()], 
 *!   @[Pike.Backend()->find_call_out()], @[Pike.Backend()->remove_call_out()], 
 *!   @[Pike.Backend()->call_out_info()] 
 */ 
 
/* This doesn't need to be here */ 
/** 
 * Fail-safe/paranoid function to write a block of data to stderr (aka fd 2). 
 */ 
PMOD_EXPORT int write_to_stderr(char *a, size_t len) 
{ 
#ifdef __NT__ 
  size_t e; 
  for(e=0;e<len;e++) 
    putc(a[e],stderr); 
#else 
  int nonblock=0; 
  size_t pos; 
  int tmp; 
 
  if(!len) return 1; 
 
  for(pos=0;pos<len;pos+=tmp) 
  { 
    tmp=write(2,a+pos,len-pos); 
    if(tmp<0) 
    { 
      tmp=0; 
      switch(errno) 
      { 
#ifdef EWOULDBLOCK 
        case EWOULDBLOCK: 
          nonblock=1; 
          set_nonblocking(2,0); 
          continue; 
#endif 
 
        case EINTR: 
          check_threads_etc(); 
          continue; 
      } 
      break; 
    } 
  } 
 
  if(nonblock) 
    set_nonblocking(2,1); 
 
#endif 
  return 1; 
} 
 
/** 
 * Get the backend (if any) that an fd is associated with. 
 */ 
PMOD_EXPORT struct object *get_backend_obj_for_fd (int fd) 
{ 
  struct Backend_struct *b = really_get_backend_for_fd (fd); 
  if (!b) return NULL; 
  return b->backend_obj; 
} 
 
/** 
 * Associate an fd with a backend. 
 */ 
PMOD_EXPORT void set_backend_for_fd (int fd, struct Backend_struct *new) 
{ 
  struct Backend_struct *old = get_backend_for_fd (fd); 
 
  PDWERR("Changing backend from %d to %d for fd %d\n", 
         old ? old->id : -1, new ? new->id : -1, fd); 
 
  if (!old) 
    low_set_backend_for_fd (fd, new); 
  else if (old != new) { 
    struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (old, fd); 
    if (box) { 
      if (new) 
        change_backend_for_box (box, new); 
      else { 
        int is_compat_box = box->callback == compat_box_dispatcher; 
        unhook_fd_callback_box (box); 
        if (is_compat_box) 
          really_free_compat_cb_box ((struct compat_cb_box *) box); 
      } 
    } 
    low_set_backend_for_fd (fd, new); 
  } 
} 
 
/* Compat stuff for old backend interface. */ 
 
struct compat_cb_box 
{ 
  struct fd_callback_box box;   /* Must be first. */ 
  file_callback read, write, read_oob, write_oob, fs_event; 
  void *read_data, *write_data, *read_oob_data, *write_oob_data, *fs_event_data; 
  int flags; /* fs event flags */ 
}; 
 
#undef DMALLOC_DESCRIBE_BLOCK 
#define DMALLOC_DESCRIBE_BLOCK(X) do {                                  \ 
    fprintf (stderr, "  backend: %p, fd: %d, events: 0x%x\n",           \ 
             X->box.backend, X->box.fd, X->box.events);                 \ 
  } while (0) 
 
static struct block_allocator compat_cb_allocator = 
  BA_INIT_PAGES(sizeof(struct compat_cb_box), 1); 
 
static struct compat_cb_box * alloc_compat_cb_box(void) { 
    return ba_alloc(&compat_cb_allocator); 
} 
 
static void really_free_compat_cb_box(struct compat_cb_box * b) { 
    ba_free(&compat_cb_allocator, b); 
} 
 
void count_memory_in_compat_cb_boxs(size_t * n, size_t * s) { 
    ba_count_all(&compat_cb_allocator, n, s); 
} 
 
void free_all_compat_cb_box_blocks(void) { 
    ba_destroy(&compat_cb_allocator); 
} 
 
static int compat_box_dispatcher (struct fd_callback_box *box, int event) 
{ 
  struct compat_cb_box *cbox = (struct compat_cb_box *) box; 
  switch (event) { 
    case PIKE_FD_READ: 
      PDWERR("[%d]BACKEND[%d]: compat_box_dispatcher for " 
             "PIKE_FD_READ to %p %p\n", THR_NO, 
             cbox->box.backend->id, cbox->read, cbox->read_data); 
      return cbox->read (cbox->box.fd, cbox->read_data); 
    case PIKE_FD_WRITE: 
      PDWERR("[%d]BACKEND[%d]: compat_box_dispatcher for " 
             "PIKE_FD_WRITE to %p %p\n", THR_NO, 
             cbox->box.backend->id, cbox->write, cbox->write_data); 
      return cbox->write (cbox->box.fd, cbox->write_data); 
    case PIKE_FD_READ_OOB: 
      PDWERR("[%d]BACKEND[%d]: compat_box_dispatcher for " 
             "PIKE_FD_READ_OOB to %p %p\n", THR_NO, 
             cbox->box.backend->id, cbox->read_oob, 
             cbox->read_oob_data); 
      return cbox->read_oob (cbox->box.fd, cbox->read_oob_data); 
    case PIKE_FD_WRITE_OOB: 
      PDWERR("[%d]BACKEND[%d]: compat_box_dispatcher for " 
             "PIKE_FD_WRITE_OOB to %p %p\n", THR_NO, 
             cbox->box.backend->id, cbox->write_oob, 
             cbox->write_oob_data); 
      return cbox->write_oob (cbox->box.fd, cbox->write_oob_data); 
    case PIKE_FD_FS_EVENT: 
      PDWERR("[%d]BACKEND[%d]: compat_box_dispatcher for " 
             "PIKE_FD_FS_EVENT to %p %p\n", THR_NO, 
             cbox->box.backend->id, cbox->fs_event, 
             cbox->fs_event_data); 
      return cbox->fs_event (cbox->box.fd, cbox->fs_event_data); 
#ifdef PIKE_DEBUG 
    default: 
      Pike_fatal ("Unexpected event type %d.\n", event); 
#endif 
  } 
  UNREACHABLE(return 0); 
} 
 
#define WRAP(CB, EVENT_BIT)                                             \ 
  void PIKE_CONCAT3(set_, CB, _callback)                                \ 
       (int fd, file_callback cb, void *data)                           \ 
  {                                                                     \ 
    struct Backend_struct *b = really_get_backend_for_fd (fd);          \ 
    struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (b, fd);          \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    PDWERR("[%d]BACKEND[%d]: set_" #CB "_callback (%d, %p, %p)\n",      \ 
           THR_NO, b->id, fd, cb, data);                                \ 
                                                                        \ 
    if (box) {                                                          \ 
      check_box (box, fd);                                              \ 
      DO_IF_DEBUG (                                                     \ 
        if (box->callback != compat_box_dispatcher)                     \ 
          Pike_fatal ("Mixing old and new style "                       \ 
                      "backend interfaces for fd %d.\n", fd);           \ 
      );                                                                \ 
      cbox = (struct compat_cb_box *) box;                              \ 
    }                                                                   \ 
    else {                                                              \ 
      if (!cb) return;                                                  \ 
      cbox = alloc_compat_cb_box();                                     \ 
      INIT_FD_CALLBACK_BOX (&cbox->box, b, NULL,                        \ 
                            fd, 0, compat_box_dispatcher, 0);           \ 
    }                                                                   \ 
                                                                        \ 
    cbox->CB = cb;                                                      \ 
    cbox->PIKE_CONCAT (CB, _data) = data;                               \ 
                                                                        \ 
    if (cb)                                                             \ 
      set_fd_callback_events (&cbox->box, cbox->box.events | EVENT_BIT, \ 
                              cbox->flags);                             \ 
    else {                                                              \ 
      set_fd_callback_events (&cbox->box, cbox->box.events & ~EVENT_BIT, \ 
                              cbox->flags);                             \ 
      if (!cbox->box.events) {                                          \ 
        unhook_fd_callback_box (&cbox->box);                            \ 
        really_free_compat_cb_box (cbox);                               \ 
      }                                                                 \ 
    }                                                                   \ 
  }                                                                     \ 
                                                                        \ 
  file_callback PIKE_CONCAT3(query_, CB, _callback) (int fd)            \ 
  {                                                                     \ 
    struct Backend_struct *b=get_backend_for_fd (fd);                   \ 
    struct fd_callback_box *box;                                        \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    if (!b) return NULL;                                                \ 
    if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;              \ 
    check_box (box, fd);                                                \ 
    DO_IF_DEBUG (                                                       \ 
      if (box->callback != compat_box_dispatcher)                       \ 
        Pike_fatal ("Mixing old and new style "                         \ 
                    "backend interfaces for fd %d.\n", fd);             \ 
    );                                                                  \ 
                                                                        \ 
    cbox = (struct compat_cb_box *) box;                                \ 
    if (!(cbox->box.events & EVENT_BIT)) return NULL;                   \ 
    return cbox->CB;                                                    \ 
  }                                                                     \ 
                                                                        \ 
  void *PIKE_CONCAT3(query_, CB, _callback_data) (int fd)               \ 
  {                                                                     \ 
    struct Backend_struct *b=get_backend_for_fd (fd);                   \ 
    struct fd_callback_box *box;                                        \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    if (!b) return NULL;                                                \ 
    if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;              \ 
    check_box (box, fd);                                                \ 
    DO_IF_DEBUG (                                                       \ 
      if (box->callback != compat_box_dispatcher)                       \ 
        Pike_fatal ("Mixing old and new style "                         \ 
                    "backend interfaces for fd %d.\n", fd);             \ 
    );                                                                  \ 
                                                                        \ 
    cbox = (struct compat_cb_box *) box;                                \ 
    if (!(cbox->box.events & EVENT_BIT)) return NULL;                   \ 
    return cbox->PIKE_CONCAT (CB, _data);                               \ 
  } 
 
#define WRAP2(CB, EVENT_BIT)                                            \ 
  void PIKE_CONCAT3(set_, CB, _callback)                                \ 
       (int fd, file_callback cb, void *data, int flags)                \ 
  {                                                                     \ 
    struct Backend_struct *b = really_get_backend_for_fd (fd);          \ 
    struct fd_callback_box *box = SAFE_GET_ACTIVE_BOX (b, fd);          \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    PDWERR("[%d]BACKEND[%d]: set_" #CB "_callback (%d, %p, %p)\n",      \ 
           THR_NO, b->id, fd, cb, data);                                \ 
                                                                        \ 
    if (box) {                                                          \ 
      check_box (box, fd);                                              \ 
      DO_IF_DEBUG (                                                     \ 
        if (box->callback != compat_box_dispatcher)                     \ 
          Pike_fatal ("Mixing old and new style "                       \ 
                      "backend interfaces for fd %d.\n", fd);           \ 
      );                                                                \ 
      cbox = (struct compat_cb_box *) box;                              \ 
    }                                                                   \ 
    else {                                                              \ 
      if (!cb) return;                                                  \ 
      cbox = alloc_compat_cb_box();                                     \ 
      INIT_FD_CALLBACK_BOX (&cbox->box, b, NULL,                        \ 
                            fd, 0, compat_box_dispatcher, flags);       \ 
    }                                                                   \ 
                                                                        \ 
    cbox->CB = cb;                                                      \ 
    cbox->PIKE_CONCAT (CB, _data) = data;                               \ 
                                                                        \ 
    if (cb)                                                             \ 
      set_fd_callback_events (&cbox->box, cbox->box.events | EVENT_BIT, \ 
                              cbox->flags);                             \ 
    else {                                                              \ 
      set_fd_callback_events (&cbox->box, cbox->box.events & ~EVENT_BIT, \ 
                              cbox->flags);                             \ 
      if (!cbox->box.events) {                                          \ 
        unhook_fd_callback_box (&cbox->box);                            \ 
        really_free_compat_cb_box (cbox);                               \ 
      }                                                                 \ 
    }                                                                   \ 
  }                                                                     \ 
                                                                        \ 
  file_callback PIKE_CONCAT3(query_, CB, _callback) (int fd)            \ 
  {                                                                     \ 
    struct Backend_struct *b=get_backend_for_fd (fd);                   \ 
    struct fd_callback_box *box;                                        \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    if (!b) return NULL;                                                \ 
    if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;              \ 
    check_box (box, fd);                                                \ 
    DO_IF_DEBUG (                                                       \ 
      if (box->callback != compat_box_dispatcher)                       \ 
        Pike_fatal ("Mixing old and new style "                         \ 
                    "backend interfaces for fd %d.\n", fd);             \ 
    );                                                                  \ 
                                                                        \ 
    cbox = (struct compat_cb_box *) box;                                \ 
    if (!(cbox->box.events & EVENT_BIT)) return NULL;                   \ 
    return cbox->CB;                                                    \ 
  }                                                                     \ 
                                                                        \ 
  void *PIKE_CONCAT3(query_, CB, _callback_data) (int fd)               \ 
  {                                                                     \ 
    struct Backend_struct *b=get_backend_for_fd (fd);                   \ 
    struct fd_callback_box *box;                                        \ 
    struct compat_cb_box *cbox;                                         \ 
                                                                        \ 
    if (!b) return NULL;                                                \ 
    if (!(box = SAFE_GET_ACTIVE_BOX (b, fd))) return NULL;              \ 
    check_box (box, fd);                                                \ 
    DO_IF_DEBUG (                                                       \ 
      if (box->callback != compat_box_dispatcher)                       \ 
        Pike_fatal ("Mixing old and new style "                         \ 
                    "backend interfaces for fd %d.\n", fd);             \ 
    );                                                                  \ 
                                                                        \ 
    cbox = (struct compat_cb_box *) box;                                \ 
    if (!(cbox->box.events & EVENT_BIT)) return NULL;                   \ 
    return cbox->PIKE_CONCAT (CB, _data);                               \ 
  } 
 
WRAP(read, PIKE_BIT_FD_READ) 
WRAP(write, PIKE_BIT_FD_WRITE) 
WRAP(read_oob, PIKE_BIT_FD_READ_OOB) 
WRAP(write_oob, PIKE_BIT_FD_WRITE_OOB) 
WRAP2(fs_event, PIKE_BIT_FD_FS_EVENT) 
 
/** 
 * Add a callback to be called by the default backend. 
 */ 
PMOD_EXPORT struct callback *debug_add_backend_callback(callback_func call, 
                                                        void *arg, 
                                                        callback_func free_func) 
{ 
  return backend_debug_add_backend_callback(default_backend, 
                                            call, 
                                            arg, 
                                            free_func); 
} 
 
void wake_up_backend(void) 
{ 
  if(default_backend) 
    backend_wake_up_backend(default_backend); 
} 
 
void do_call_outs(void) 
{ 
  if(default_backend) { 
    INVALIDATE_CURRENT_TIME(); 
    backend_do_call_outs(default_backend); 
  } 
} 
 
#ifdef PIKE_DEBUG 
long do_debug_cycle=1; 
long current_do_debug_cycle=0; 
void do_debug(void) 
{ 
  extern void check_all_arrays(void); 
  extern void check_all_mappings(void); 
  extern void check_all_programs(void); 
  extern void check_all_objects(void); 
  extern void verify_shared_strings_tables(void); 
  extern void slow_check_stack(void); 
 
  if(current_do_debug_cycle) return; 
  current_do_debug_cycle=++do_debug_cycle; 
 
  if (d_flag > 2) { 
    verify_shared_strings_tables(); 
    slow_check_stack(); 
    check_all_arrays(); 
    check_all_mappings(); 
    check_all_programs(); 
    check_all_objects(); 
  } 
 
  call_callback(& do_debug_callbacks, 0); 
 
  if(default_backend) 
    backend_do_debug(default_backend); 
 
  if(d_flag>3) do_gc(NULL, 1); 
 
  current_do_debug_cycle=0; 
} 
 
/** 
 * Assert that an fd is valid and not already in use by any backend. 
 */ 
PMOD_EXPORT void debug_check_fd_not_in_use (int fd) 
{ 
  if (fd < 0) Pike_fatal ("Invalid fd: %d\n", fd); 
  if (fd < fd_map_size && fd_map[fd]) 
    Pike_fatal ("fd %d already in use by backend %d.\n", fd, fd_map[fd]->id); 
} 
 
#endif /* PIKE_DEBUG */ 
 
static struct callback *mem_callback; 
 
void init_backend(void) 
{ 
  PDWERR("BACKEND: Init compat callback boxes...\n"); 
  PDWERR("BACKEND: INIT...\n"); 
  INIT; 
  PDWERR("BACKEND: Creating default backend...\n"); 
  { 
    /* Select something suitable. */ 
#ifdef OPEN_POLL_DEVICE 
    /* Note that creation of a poll device backend may fail. */ 
    JMP_BUF recovery; 
    if (SETJMP(recovery)) { 
#ifdef HAVE_POLL 
      default_backend_obj = clone_object(PollBackend_program, 0); 
#else 
      default_backend_obj = clone_object(SelectBackend_program, 0); 
#endif 
    } else { 
      default_backend_obj = clone_object(PollDeviceBackend_program, 0); 
    } 
    UNSETJMP(recovery); 
#elif defined(HAVE_POLL) 
    default_backend_obj = clone_object(PollBackend_program, 0); 
#else 
    default_backend_obj = clone_object(SelectBackend_program, 0); 
#endif 
    default_backend = get_storage(default_backend_obj, Backend_program); 
 
    mem_callback=add_memory_usage_callback(count_memory_in_call_outs,0,0); 
 
    add_object_constant("__backend", default_backend_obj, 0); 
    add_program_constant("DefaultBackendClass", default_backend_obj->prog, 0); 
  } 
} 
 
#ifdef DO_PIKE_CLEANUP 
void exit_backend(void) 
{ 
  /* Note: The mem_callback has already been freed 
   *       by exit_builtin_efuns() at this point. 
   */ 
  /* if (mem_callback) remove_callback(mem_callback); */ 
  free_object(default_backend_obj); 
  default_backend = 0; 
  EXIT; 
} 
 
/* Note: This is called when the last backend object exits, which might be 
 * after exit_backend if there's garbage. */ 
static void backend_cleanup(void) 
{ 
#ifdef OPEN_POLL_DEVICE 
  if (pdb_backends) { 
    free(pdb_backends); 
    num_pdb_backends = 0; 
  } 
#endif /* OPEN_POLL_DEVICE */ 
  free_all_compat_cb_box_blocks(); 
  if(fd_map) 
  { 
    free(fd_map); 
    fd_map=0; 
    fd_map_size=0; 
  } 
#ifdef HAVE_BROKEN_F_SETFD 
  cleanup_close_on_exec(); 
#endif /* HAVE_BROKEN_F_SETFD */ 
} 
#endif