1
  
2
  
3
  
4
  
5
  
6
  
7
  
8
  
9
  
10
  
11
  
12
  
13
  
14
  
15
  
16
  
17
  
18
  
19
  
20
  
21
  
22
  
23
  
24
  
25
  
26
  
27
  
28
  
29
  
30
  
31
  
32
  
33
  
34
  
35
  
36
  
37
  
38
  
39
  
40
  
41
  
42
  
43
  
44
  
45
  
46
  
47
  
48
  
49
  
50
  
51
  
52
  
53
  
54
  
55
  
56
  
57
  
58
  
59
  
60
  
61
  
62
  
63
  
64
  
65
  
66
  
67
  
68
  
69
  
70
  
71
  
72
  
73
  
74
  
75
  
76
  
77
  
78
  
79
  
80
  
81
  
82
  
83
  
84
  
85
  
86
  
87
  
88
  
89
  
90
  
91
  
92
  
93
  
94
  
95
  
96
  
97
  
98
  
99
  
100
  
101
  
102
  
103
  
104
  
105
  
106
  
107
  
108
  
109
  
110
  
111
  
112
  
113
  
114
  
115
  
116
  
117
  
118
  
119
  
120
  
121
  
122
  
123
  
124
  
125
  
126
  
127
  
128
  
129
  
130
  
131
  
132
  
133
  
134
  
135
  
136
  
137
  
138
  
139
  
140
  
141
  
142
  
143
  
144
  
145
  
146
  
147
  
148
  
149
  
150
  
151
  
152
  
153
  
154
  
155
  
156
  
157
  
158
  
159
  
160
  
161
  
162
  
163
  
164
  
165
  
166
  
167
  
168
  
169
  
170
  
171
  
172
  
173
  
174
  
175
  
176
  
177
  
178
  
179
  
180
  
181
  
182
  
183
  
184
  
185
  
186
  
187
  
188
  
189
  
190
  
191
  
192
  
193
  
194
  
195
  
196
  
197
  
198
  
199
  
200
  
201
  
202
  
203
  
204
  
205
  
206
  
207
  
208
  
209
  
210
  
211
  
212
  
213
  
214
  
215
  
216
  
217
  
218
  
219
  
220
  
221
  
222
  
223
  
224
  
225
  
226
  
227
  
228
  
229
  
230
  
231
  
232
  
233
  
234
  
235
  
236
  
237
  
238
  
239
  
240
  
241
  
242
  
243
  
244
  
245
  
246
  
247
  
248
  
249
  
250
  
251
  
252
  
253
  
254
  
255
  
256
  
257
  
258
  
259
  
260
  
261
  
262
  
263
  
264
  
265
  
266
  
267
  
268
  
269
  
270
  
271
  
272
  
273
  
274
  
275
  
276
  
277
  
278
  
279
  
280
  
281
  
282
  
283
  
284
  
285
  
286
  
287
  
288
  
289
  
290
  
291
  
292
  
293
  
294
  
295
  
296
  
297
  
298
  
299
  
300
  
301
  
302
  
303
  
304
  
305
  
306
  
307
  
308
  
309
  
310
  
311
  
312
  
313
  
314
  
315
  
316
  
317
  
318
  
319
  
320
  
321
  
322
  
323
  
324
  
325
  
326
  
327
  
328
  
329
  
330
  
331
  
332
  
333
  
334
  
335
  
336
  
337
  
338
  
339
  
340
  
341
  
342
  
343
  
344
  
345
  
346
  
347
  
348
  
349
  
350
  
351
  
352
  
353
  
354
  
355
  
356
  
357
  
358
  
359
  
360
  
361
  
362
  
363
  
364
  
365
  
366
  
367
  
368
  
369
  
370
  
371
  
372
  
373
  
374
  
375
  
376
  
377
  
378
  
379
  
380
  
381
  
382
  
383
  
384
  
385
  
386
  
387
  
388
  
389
  
390
  
391
  
392
  
393
  
394
  
395
  
396
  
397
  
398
  
399
  
400
  
401
  
402
  
403
  
404
  
405
  
406
  
407
  
408
  
409
  
410
  
411
  
412
  
413
  
414
  
415
  
416
  
417
  
418
  
419
  
420
  
421
  
422
  
423
  
424
  
425
  
426
  
427
  
428
  
429
  
430
  
431
  
432
  
433
  
434
  
435
  
436
  
437
  
438
  
439
  
440
  
441
  
442
  
443
  
444
  
445
  
446
  
447
  
448
  
449
  
450
  
451
  
452
  
453
  
454
  
455
  
456
  
457
  
458
  
459
  
460
  
461
  
462
  
463
  
464
  
465
  
466
  
467
  
468
  
469
  
470
  
471
  
472
  
473
  
474
  
475
  
476
  
477
  
478
  
479
  
480
  
481
  
482
  
483
  
484
  
485
  
486
  
487
  
488
  
489
  
490
  
491
  
492
  
493
  
494
  
495
  
496
  
497
  
498
  
499
  
500
  
501
  
502
  
503
  
504
  
505
  
506
  
507
  
508
  
509
  
510
  
511
  
512
  
513
  
514
  
515
  
516
  
517
  
518
  
519
  
520
  
521
  
522
  
523
  
524
  
525
  
526
  
527
  
528
  
529
  
530
  
531
  
532
  
533
  
534
  
535
  
536
  
537
  
538
  
539
  
540
  
541
  
542
  
543
  
544
  
545
  
546
  
547
  
548
  
549
  
550
  
551
  
552
  
553
  
554
  
555
  
556
  
557
  
558
  
559
  
560
  
561
  
562
  
563
  
564
  
565
  
566
  
567
  
568
  
569
  
570
  
571
  
572
  
573
  
574
  
575
  
576
  
577
  
578
  
579
  
580
  
581
  
582
  
583
  
584
  
585
  
586
  
587
  
588
  
589
  
590
  
591
  
592
  
593
  
594
  
595
  
596
  
597
  
598
  
599
  
600
  
601
  
602
  
603
  
604
  
605
  
606
  
607
  
608
  
609
  
610
  
611
  
612
  
613
  
614
  
615
  
616
  
617
  
618
  
619
  
620
  
621
  
622
  
623
  
624
  
625
  
626
  
627
  
628
  
629
  
630
  
631
  
632
  
633
  
634
  
635
  
636
  
637
  
638
  
639
  
640
  
641
  
642
  
643
  
644
  
645
  
646
  
647
  
648
  
649
  
650
  
651
  
652
  
653
  
654
  
655
  
656
  
657
  
658
  
659
  
660
  
661
  
662
  
663
  
664
  
665
  
666
  
667
  
668
  
669
  
670
  
671
  
672
  
673
  
674
  
675
  
676
  
677
  
678
  
679
  
680
  
681
  
682
  
683
  
684
  
685
  
686
  
687
  
688
  
689
  
690
  
691
  
692
  
693
  
694
  
695
  
696
  
697
  
698
  
699
  
700
  
701
  
702
  
703
  
704
  
705
  
706
  
707
  
708
  
709
  
710
  
711
  
712
  
713
  
714
  
715
  
716
  
717
  
718
  
719
  
720
  
721
  
722
  
723
  
724
  
725
  
726
  
727
  
728
  
729
  
730
  
731
  
732
  
733
  
734
  
735
  
736
  
737
  
738
  
739
  
740
  
741
  
742
  
743
  
744
  
745
  
746
  
747
  
748
  
749
  
750
  
751
  
752
  
753
  
754
  
755
  
756
  
757
  
758
  
759
  
760
  
761
  
762
  
763
  
764
  
765
  
766
  
767
  
768
  
769
  
770
  
771
  
772
  
773
  
774
  
775
  
776
  
777
  
778
  
779
  
780
  
781
  
782
  
783
  
784
  
785
  
786
  
787
  
788
  
789
  
790
  
791
  
792
  
793
  
794
  
795
  
796
  
797
  
798
  
799
  
800
  
801
  
802
  
803
  
804
  
805
  
806
  
807
  
808
  
809
  
810
  
811
  
812
  
813
  
814
  
815
  
816
  
817
  
818
  
819
  
820
  
821
  
822
  
823
  
824
  
825
  
826
  
827
  
828
  
829
  
830
  
831
  
832
  
833
  
834
  
835
  
836
  
837
  
838
  
839
  
840
  
841
  
842
  
843
  
844
  
845
  
846
  
847
  
848
  
849
  
850
  
851
  
852
  
853
  
854
  
855
  
856
  
857
  
858
  
859
  
860
  
861
  
862
  
863
  
864
  
865
  
866
  
867
  
868
  
869
  
870
  
871
  
872
  
873
  
874
  
875
  
876
  
877
  
878
  
879
  
880
  
881
  
882
  
883
  
884
  
885
  
886
  
887
  
888
  
889
  
890
  
891
  
892
  
893
  
894
  
895
  
896
  
897
  
898
  
899
  
900
  
901
  
902
  
903
  
904
  
905
  
906
  
907
  
908
  
909
  
910
  
911
  
912
  
913
  
914
  
915
  
916
  
917
  
918
  
919
  
920
  
921
  
922
  
923
  
924
  
925
  
926
  
927
  
928
  
929
  
930
  
931
  
932
  
933
  
934
  
935
  
936
  
937
  
938
  
939
  
940
  
941
  
942
  
943
  
944
  
945
  
946
  
947
  
948
  
949
  
950
  
951
  
952
  
953
  
954
  
955
  
956
  
957
  
958
  
959
  
960
  
961
  
962
  
963
  
964
  
965
  
966
  
967
  
968
  
969
  
970
  
971
  
972
  
973
  
974
  
975
  
976
  
977
  
978
  
979
  
980
  
981
  
982
  
983
  
984
  
985
  
986
  
987
  
988
  
989
  
990
  
991
  
992
  
993
  
994
  
995
  
996
  
997
  
998
  
999
  
1000
  
1001
  
1002
  
1003
  
1004
  
1005
  
1006
  
1007
  
1008
  
1009
  
1010
  
1011
  
1012
  
1013
  
1014
  
1015
  
1016
  
1017
  
1018
  
1019
  
1020
  
1021
  
1022
  
1023
  
1024
  
1025
  
1026
  
1027
  
1028
  
1029
  
1030
  
1031
  
1032
  
1033
  
1034
  
1035
  
1036
  
1037
  
1038
  
1039
  
1040
  
1041
  
1042
  
1043
  
1044
  
1045
  
1046
  
1047
  
1048
  
1049
  
1050
  
1051
  
1052
  
1053
  
1054
  
1055
  
1056
  
1057
  
1058
  
1059
  
1060
  
1061
  
1062
  
1063
  
1064
  
1065
  
1066
  
1067
  
1068
  
1069
  
1070
  
1071
  
1072
  
1073
  
1074
  
1075
  
1076
  
1077
  
1078
  
1079
  
1080
  
1081
  
1082
  
1083
  
1084
  
1085
  
1086
  
1087
  
1088
  
1089
  
1090
  
1091
  
1092
  
1093
  
1094
  
1095
  
1096
  
1097
  
1098
  
1099
  
1100
  
1101
  
1102
  
1103
  
1104
  
1105
  
1106
  
1107
  
1108
  
1109
  
1110
  
1111
  
1112
  
1113
  
1114
  
1115
  
1116
  
1117
  
1118
  
1119
  
1120
  
1121
  
1122
  
1123
  
1124
  
1125
  
1126
  
1127
  
1128
  
1129
  
1130
  
1131
  
1132
  
1133
  
1134
  
1135
  
1136
  
1137
  
1138
  
1139
  
1140
  
1141
  
1142
  
1143
  
1144
  
1145
  
1146
  
1147
  
1148
  
1149
  
1150
  
1151
  
1152
  
1153
  
1154
  
1155
  
1156
  
1157
  
1158
  
1159
  
1160
  
1161
  
1162
  
1163
  
1164
  
1165
  
1166
  
1167
  
1168
  
1169
  
1170
  
1171
  
1172
  
1173
  
1174
  
1175
  
1176
  
1177
  
1178
  
1179
  
1180
  
1181
  
1182
  
1183
  
1184
  
1185
  
1186
  
1187
  
1188
  
1189
  
1190
  
1191
  
1192
  
1193
  
1194
  
1195
  
1196
  
1197
  
1198
  
1199
  
1200
  
1201
  
1202
  
1203
  
1204
  
1205
  
1206
  
1207
  
1208
  
1209
  
1210
  
1211
  
1212
  
1213
  
1214
  
1215
  
1216
  
1217
  
1218
  
1219
  
1220
  
1221
  
1222
  
1223
  
1224
  
1225
  
1226
  
1227
  
1228
  
1229
  
1230
  
1231
  
1232
  
1233
  
1234
  
1235
  
1236
  
1237
  
1238
  
1239
  
1240
  
1241
  
1242
  
1243
  
1244
  
1245
  
1246
  
1247
  
1248
  
1249
  
1250
  
1251
  
1252
  
1253
  
1254
  
1255
  
1256
  
1257
  
1258
  
1259
  
1260
  
1261
  
1262
  
1263
  
1264
  
1265
  
1266
  
1267
  
1268
  
1269
  
1270
  
1271
  
1272
  
1273
  
1274
  
1275
  
1276
  
1277
  
1278
  
1279
  
1280
  
1281
  
1282
  
1283
  
1284
  
1285
  
1286
  
1287
  
1288
  
1289
  
1290
  
1291
  
1292
  
1293
  
1294
  
1295
  
1296
  
1297
  
1298
  
1299
  
1300
  
1301
  
1302
  
1303
  
1304
  
1305
  
1306
  
1307
  
1308
  
1309
  
1310
  
1311
  
1312
  
1313
  
1314
  
1315
  
1316
  
1317
  
1318
  
1319
  
1320
  
1321
  
1322
  
1323
  
1324
  
1325
  
1326
  
1327
  
1328
  
1329
  
1330
  
1331
  
1332
  
1333
  
1334
  
1335
  
1336
  
1337
  
1338
  
1339
  
1340
  
1341
  
1342
  
1343
  
1344
  
1345
  
1346
  
1347
  
1348
  
1349
  
1350
  
1351
  
1352
  
1353
  
1354
  
1355
  
1356
  
1357
  
1358
  
1359
  
1360
  
1361
  
1362
  
1363
  
1364
  
1365
  
1366
  
1367
  
1368
  
1369
  
1370
  
1371
  
1372
  
1373
  
1374
  
1375
  
1376
  
1377
  
1378
  
1379
  
1380
  
1381
  
1382
  
1383
  
1384
  
1385
  
1386
  
1387
  
1388
  
1389
  
1390
  
1391
  
1392
  
1393
  
1394
  
1395
  
1396
  
1397
  
1398
  
1399
  
1400
  
1401
  
1402
  
1403
  
1404
  
1405
  
1406
  
1407
  
1408
  
1409
  
1410
  
1411
  
1412
  
1413
  
1414
  
1415
  
1416
  
1417
  
1418
  
1419
  
1420
  
1421
  
1422
  
1423
  
1424
  
1425
  
1426
  
1427
  
1428
  
1429
  
1430
  
1431
  
1432
  
1433
  
1434
  
1435
  
1436
  
1437
  
1438
  
1439
  
1440
  
1441
  
1442
  
1443
  
1444
  
1445
  
1446
  
1447
  
1448
  
1449
  
1450
  
1451
  
1452
  
1453
  
1454
  
1455
  
1456
  
1457
  
1458
  
1459
  
1460
  
1461
  
1462
  
1463
  
1464
  
1465
  
1466
  
1467
  
1468
  
1469
  
1470
  
1471
  
1472
  
1473
  
1474
  
1475
  
1476
  
1477
  
1478
  
1479
  
1480
  
1481
  
1482
  
1483
  
1484
  
1485
  
1486
  
1487
  
1488
  
1489
  
1490
  
1491
  
1492
  
1493
  
1494
  
1495
  
1496
  
1497
  
1498
  
1499
  
1500
  
1501
  
1502
  
1503
  
1504
  
1505
  
1506
  
1507
  
1508
  
1509
  
1510
  
1511
  
1512
  
1513
  
1514
  
1515
  
1516
  
1517
  
1518
  
1519
  
1520
  
1521
  
1522
  
1523
  
1524
  
1525
  
1526
  
1527
  
1528
  
1529
  
1530
  
1531
  
1532
  
1533
  
1534
  
1535
  
1536
  
1537
  
1538
  
1539
  
1540
  
1541
  
1542
  
1543
  
1544
  
1545
  
1546
  
1547
  
1548
  
1549
  
1550
  
1551
  
1552
  
1553
  
1554
  
1555
  
1556
  
1557
  
1558
  
1559
  
1560
  
1561
  
1562
  
1563
  
1564
  
1565
  
1566
  
1567
  
1568
  
1569
  
1570
  
1571
  
1572
  
1573
  
1574
  
1575
  
1576
  
1577
  
1578
  
1579
  
1580
  
1581
  
1582
  
1583
  
1584
  
1585
  
1586
  
1587
  
1588
  
1589
  
1590
  
1591
  
1592
  
1593
  
1594
  
1595
  
1596
  
1597
  
1598
  
1599
  
1600
  
1601
  
1602
  
1603
  
1604
  
1605
  
1606
  
1607
  
1608
  
1609
  
1610
  
1611
  
1612
  
1613
  
1614
  
1615
  
1616
  
1617
  
1618
  
1619
  
1620
  
1621
  
1622
  
1623
  
1624
  
1625
  
1626
  
1627
  
1628
  
1629
  
1630
  
1631
  
1632
  
1633
  
1634
  
1635
  
1636
  
1637
  
1638
  
1639
  
1640
  
1641
  
1642
  
1643
  
1644
  
1645
  
1646
  
1647
  
1648
  
1649
  
1650
  
1651
  
1652
  
1653
  
1654
  
1655
  
1656
  
1657
  
1658
  
1659
  
1660
  
1661
  
1662
  
1663
  
1664
  
1665
  
1666
  
1667
  
1668
  
1669
  
1670
  
1671
  
1672
  
1673
  
1674
  
1675
  
1676
  
1677
  
1678
  
1679
  
1680
  
1681
  
1682
  
1683
  
1684
  
1685
  
1686
  
1687
  
1688
  
1689
  
1690
  
1691
  
1692
  
1693
  
1694
  
1695
  
1696
  
1697
  
1698
  
1699
  
1700
  
1701
  
1702
  
1703
  
1704
  
1705
  
1706
  
1707
  
1708
  
1709
  
1710
  
1711
  
1712
  
1713
  
1714
  
1715
  
1716
  
1717
  
1718
  
1719
  
1720
  
1721
  
1722
  
1723
  
1724
  
1725
  
1726
  
1727
  
1728
  
1729
  
1730
  
1731
  
1732
  
1733
  
1734
  
1735
  
1736
  
1737
  
1738
  
1739
  
1740
  
1741
  
1742
  
1743
  
1744
  
1745
  
1746
  
1747
  
1748
  
1749
  
1750
  
1751
  
1752
  
1753
  
1754
  
1755
  
1756
  
1757
  
1758
  
1759
  
1760
  
1761
  
1762
  
1763
  
1764
  
1765
  
1766
  
1767
  
1768
  
1769
  
1770
  
1771
  
1772
  
1773
  
1774
  
1775
  
1776
  
1777
  
1778
  
1779
  
1780
  
1781
  
1782
  
1783
  
1784
  
1785
  
1786
  
1787
  
1788
  
1789
  
1790
  
1791
  
1792
  
1793
  
1794
  
1795
  
1796
  
1797
  
1798
  
1799
  
1800
  
1801
  
1802
  
1803
  
1804
  
1805
  
1806
  
1807
  
1808
  
1809
  
1810
  
1811
  
1812
  
1813
  
1814
  
1815
  
1816
  
1817
  
1818
  
1819
  
1820
  
1821
  
1822
  
1823
  
1824
  
1825
  
1826
  
1827
  
1828
  
1829
  
1830
  
1831
  
1832
  
1833
  
1834
  
1835
  
1836
  
1837
  
1838
  
1839
  
1840
  
1841
  
1842
  
1843
  
1844
  
1845
  
1846
  
1847
  
1848
  
1849
  
1850
  
1851
  
1852
  
1853
  
1854
  
1855
  
1856
  
1857
  
1858
  
1859
  
1860
  
1861
  
1862
  
1863
  
1864
  
1865
  
1866
  
1867
  
1868
  
1869
  
1870
  
1871
  
1872
  
1873
  
1874
  
1875
  
1876
  
1877
  
1878
  
1879
  
1880
  
1881
  
1882
  
1883
  
// This file is part of Roxen WebServer. 
// Copyright © 1996 - 2009, Roxen IS. 
// $Id: cache.pike,v 1.109 2009/11/18 17:43:43 mast Exp $ 
 
#include <roxen.h> 
#include <config.h> 
 
#ifdef MORE_CACHE_DEBUG 
# define MORE_CACHE_WERR(X...) report_debug("CACHE: "+X) 
# undef CACHE_DEBUG 
# define CACHE_DEBUG 
#else 
# define MORE_CACHE_WERR(X...) 0 
#endif 
 
#ifdef CACHE_DEBUG 
# define CACHE_WERR(X...) report_debug("CACHE: "+X) 
#else 
# define CACHE_WERR(X...) 0 
#endif 
 
#ifdef NEW_RAM_CACHE 
 
// FIXME: Statistics from the gc for invalid cache entry ratio. 
 
constant startup_cache_size = 1024 * 1024; 
// Cache size per manager to use before we've read the config setting. 
 
class CacheEntry (mixed key, mixed data) 
//! Base class for cache entries. 
{ 
  // FIXME: Consider unifying this with CacheKey. But in that case we 
  // need to ensure "interpreter lock" atomicity below. 
 
  int size; 
  //! The size of this cache entry, as measured by @[Pike.count_memory]. 
 
#ifdef DEBUG_CACHE_SIZES 
  int cmp_size; 
  // Size without counting strings. Used to compare the size between 
  // cache_set and cache_clean. Strings are excluded since they might 
  // get or lose unrelated refs in the time between which would make 
  // the comparison unreliable. This might make us miss significant 
  // strings though, but it's hard to get around it. 
#endif 
 
  int timeout; 
  //! Unix time when the entry times out, or zero if there's no 
  //! timeout. 
 
  //! @decl int|float cost; 
  //! 
  //! The creation cost for the entry, according to the metric used by 
  //! the cache manager (provided it implements cost). 
 
  protected string format_key() 
  { 
    if (stringp (key)) { 
      if (sizeof (key) > 40) 
        return sprintf ("%q...", key[..39 - sizeof ("...")]); 
      else 
        return sprintf ("%q", key); 
    } 
    else if (objectp (key)) 
      return sprintf ("%O", key); 
    else 
      return sprintf ("%t", key); 
  } 
 
  protected string _sprintf (int flag) 
  { 
    return flag == 'O' && sprintf ("CacheEntry(%s, %db)", format_key(), size); 
  } 
} 
 
class CacheStats 
//! Holds statistics for each named cache. 
{ 
  int count; 
  //! The number of entries in the cache. 
 
  int size; 
  //! The sum of @[CacheEntry.size] for all cache entries in the cache. 
 
  int hits, misses; 
  //! Plain counts of cache hits and misses. 
 
#ifdef RAMCACHE_STATS 
  int byte_hits, byte_misses; 
  //! Byte hit and miss count. Note that @[byte_misses] is determined 
  //! when a new entry is added - it will not include when no new 
  //! entry was created after a cache miss. 
 
  int|float cost_hits, cost_misses; 
  //! Hit and miss count according to the cache manager cost metric. 
  //! Note that @[cost_misses] is determined when a new entry is added 
  //! - it will not include when no new entry was created after a 
  //! cache miss. 
#endif 
 
  protected string _sprintf (int flag) 
  { 
    return flag == 'O' && sprintf ("CacheStats(%d, %dk)", count, size / 1024); 
  } 
} 
 
class CacheManager 
//! A cache manager handles one or more caches, applying the same 
//! eviction policy and the same size limit on all of them. I.e. it's 
//! practically one cache, and the named caches inside only act as 
//! separate name spaces. 
{ 
  //! @decl constant string name; 
  //! 
  //! A unique name to identify the manager. It is also used as 
  //! display name. 
 
  //! @decl constant string doc; 
  //! 
  //! A description of the manager and its eviction policy in html. 
 
  int total_size_limit; 
  //! Maximum allowed size including the cache manager overhead. 
 
  int size; 
  //! The sum of @[CacheStats.size] for all named caches. 
 
  int size_limit; 
  //! Maximum allowed size for cache entries - @[size] should never 
  //! greater than this. This is a cached value calculated from 
  //! @[total_size_limit] on regular intervals. 
 
  mapping(string:mapping(mixed:CacheEntry)) lookup = ([]); 
  //! Lookup mapping on the form @expr{(["cache_name": ([key: data])])@}. 
  //! 
  //! For functions in this class, a cache submapping does not exist 
  //! only due to race, so the cache should just be ignored. 
 
  mapping(string:CacheStats) stats = ([]); 
  //! Statistics for the named caches managed by this object. 
  //! 
  //! For functions in this class, a @[CacheStats] object does not 
  //! exist only due to race, so the cache should just be ignored. 
 
  //! @decl program CacheEntry; 
  //! 
  //! The manager-specific class to use to create @[CacheEntry] objects. 
 
  void got_miss (string cache_name, mixed key, mapping cache_context); 
  //! Called when @[cache_lookup] records a cache miss. 
 
  protected void account_miss (string cache_name) 
  { 
    if (CacheStats cs = stats[cache_name]) 
      cs->misses++; 
  } 
 
  void got_hit (string cache_name, CacheEntry entry, mapping cache_context); 
  //! Called when @[cache_lookup] records a cache hit. 
 
  protected void account_hit (string cache_name, CacheEntry entry) 
  { 
    if (CacheStats cs = stats[cache_name]) { 
      cs->hits++; 
#ifdef RAMCACHE_STATS 
      cs->byte_hits += entry->size; 
      cs->cost_hits += entry->cost; 
#endif 
    } 
  } 
 
  int add_entry (string cache_name, CacheEntry entry, 
                 int old_entry, mapping cache_context); 
  //! Called to add an entry to the cache. Should also evict entries 
  //! as necessary to keep @expr{@[size] <= @[size_limit]@}. 
  //! 
  //! If @[old_entry] is set then the entry hasn't been created from 
  //! scratch, e.g. there is no prior @[got_miss] call. Returns 1 if 
  //! the entry got added to the cache. Returns 0 if the function 
  //! chose to evict it immediately or if the cache has disappeared. 
 
  private void account_remove_entry (string cache_name, CacheEntry entry) 
  { 
    if (CacheStats cs = stats[cache_name]) { 
      cs->count--; 
      cs->size -= entry->size; 
      ASSERT_IF_DEBUG (cs->size /*%O*/ >= 0, cs->size); 
      ASSERT_IF_DEBUG (cs->count /*%O*/ >= 0, cs->count); 
    } 
    size -= entry->size; 
    ASSERT_IF_DEBUG (size /*%O*/ >= 0, size); 
  } 
 
  protected int low_add_entry (string cache_name, CacheEntry entry) 
  { 
    ASSERT_IF_DEBUG (entry->size /*%O*/, entry->size); 
 
    if (CacheStats cs = stats[cache_name]) { 
#ifdef RAMCACHE_STATS 
      // Assume that the addition of the new entry came about due to a 
      // cache miss. 
      cs->byte_misses += entry->size; 
      cs->cost_misses += entry->cost; 
#endif 
 
      if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) { 
        // vvv Relying on the interpreter lock from here. 
        CacheEntry old_entry = lm[entry->key]; 
        lm[entry->key] = entry; 
        // ^^^ Relying on the interpreter lock to here. 
 
        if (old_entry) { 
          account_remove_entry (cache_name, old_entry); 
          remove_entry (cache_name, old_entry); 
        } 
 
        cs->count++; 
        cs->size += entry->size; 
        size += entry->size; 
 
        if (!(cs->misses & 0x3fff)) // = 16383 
          // Approximate the number of misses as the number of new entries 
          // added to the cache. That should be a suitable unit to use for the 
          // update interval since the manager overhead should be linear to 
          // the number of cached entries. 
          update_size_limit(); 
      } 
 
      return 1; 
    } 
 
    return 0; 
  } 
 
  int remove_entry (string cache_name, CacheEntry entry); 
  //! Called to delete an entry from the cache. Should use 
  //! @[low_remove_entry] to do the atomic removal. Must ensure the 
  //! entry is removed from any extra data structures, regardless 
  //! whether it's already gone from the @[lookup] mapping or not. 
  //! Returns the return value from @[low_remove_entry]. 
 
  protected int low_remove_entry (string cache_name, CacheEntry entry) 
  //! Returns 1 if the entry got removed from the cache, or 0 if it 
  //! wasn't found in the cache or if the cache has disappeared. 
  { 
    if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) 
      if (lm[entry->key] == entry) { 
        // Relying on the interpreter here. 
        m_delete (lm, entry->key); 
        account_remove_entry (cache_name, entry); 
        return 1; 
      } 
    return 0; 
  } 
 
  void evict (int max_size); 
  //! Called to evict entries until @expr{@[size] <= @[max_size]@}. 
 
  void after_gc() {} 
  //! Called from the periodic GC, after stale and invalid entries 
  //! have been removed from the cache. 
 
  int manager_size_overhead() 
  //! Returns the size consumed by the manager itself, excluding the 
  //! cache entries. 
  { 
    return (Pike.count_memory (-1, this) + 
            Pike.count_memory (0, stats) + 
            Pike.count_memory ((["block_objects": 1]), lookup)); 
  } 
 
  void update_size_limit() 
  { 
    MORE_CACHE_WERR ("%O: update_size_limit\n", this); 
    int mgr_oh = manager_size_overhead(); 
    size_limit = max (0, total_size_limit - mgr_oh); 
    if (size > size_limit) { 
      CACHE_WERR ("%O: Evicting %db " 
                  "(entry size limit %db, manager overhead %db, total %db)\n", 
                  this, size - size_limit, 
                  size_limit, mgr_oh, total_size_limit); 
      evict (size_limit); 
    } 
  } 
 
  string format_cost (int|float cost) {return "-";} 
  //! Function to format a cost measurement for display in the status 
  //! page. 
 
  protected void create (int total_size_limit) 
  { 
    this_program::total_size_limit = total_size_limit; 
    update_size_limit(); 
  } 
 
  protected string _sprintf (int flag) 
  { 
    return flag == 'O' && 
      sprintf ("CacheManager(%s: %dk/%dk)", 
               this->name || "-", size / 1024, size_limit / 1024); 
  } 
} 
 
#if 0 
class CM_Random 
{ 
  inherit CacheManager; 
 
  constant name = "Random"; 
  constant doc = #"\ 
This is a very simple cache manager that just evicts entries from the 
cache at random. The only upside with it is that the cache management 
overhead is minimal."; 
 
  // Workaround since "constant CacheEntry = global::CacheEntry;" 
  // currently causes segfault in 7.8. 
  constant CacheEntry = global::CacheEntry; 
 
  void got_miss (string cache_name, mixed key, mapping cache_context) 
  { 
    account_miss (cache_name); 
  } 
 
  void got_hit (string cache_name, CacheEntry entry, mapping cache_context) 
  { 
    account_hit (cache_name, entry); 
  } 
 
  int add_entry (string cache_name, CacheEntry entry, 
                 int old_entry, mapping cache_context) 
  { 
    int res = low_add_entry (cache_name, entry); 
    if (size > size_limit) evict (size_limit); 
    return res; 
  } 
 
  int remove_entry (string cache_name, CacheEntry entry) 
  { 
    return low_remove_entry (cache_name, entry); 
  } 
 
  void evict (int max_size) 
  { 
    while (size > max_size) { 
      if (!sizeof (lookup)) break; 
      // Relying on the interpreter lock here. 
      string cache_name = random (lookup)[0]; 
 
      if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) { 
        if (sizeof (lm)) { 
          // Relying on the interpreter lock here. 
          CacheEntry entry = random (lm)[1]; 
          MORE_CACHE_WERR ("%s: Size is %d - evicting %O.\n", 
                           cache_name, size, entry); 
          low_remove_entry (cache_name, entry); 
        } 
        else 
          m_delete (lookup, cache_name); 
      } 
    } 
  } 
} 
 
protected CM_Random cm_random = CM_Random (startup_cache_size); 
#endif 
 
class CM_GreedyDual 
//! Base class for cache managers that works with some variant of the 
//! GreedyDual algorithm (see e.g. Cao and Irani, "Cost-Aware WWW 
//! Proxy Caching Algorithms" in "Proceedings of the 1997 USENIX 
//! Symposium on Internet Technology and Systems"): 
//! 
//! A priority queue is maintained, which contains all entries ordered 
//! according to a priority value. The entry with the lowest priority 
//! is always chosen for eviction. When a new entry p is added, and 
//! each time it is hit afterwards, the priority is set to v(p) + L, 
//! where v(p) is p's value according to some algorithm-specific 
//! definition, and L is the lowest priority value in the cache. This 
//! means that the priority values constantly increases, so that old 
//! entries without hits eventually gets evicted regardless of their 
//! initial v(p). 
{ 
  inherit CacheManager; 
 
  class CacheEntry 
  { 
    inherit global::CacheEntry; 
 
    int|float value; 
    //! The value of the entry, i.e. v(p) in the class description. 
 
    int|float pval; 
    //! The priority value for the entry, defining its position in 
    //! @[priority_list]. Must not change for an entry that is 
    //! currently a member of @[priority_list]. 
 
    string cache_name; 
    //! Need the cache name to find the entry, since @[priority_list] 
    //! is global. 
 
    protected int `< (CacheEntry other) 
    { 
      return pval < other->pval; 
    } 
 
    protected string _sprintf (int flag) 
    { 
      return flag == 'O' && sprintf ("CacheEntry(%s, %db, %O)", 
                                     format_key(), size, value); 
    } 
  } 
 
  multiset(CacheEntry) priority_list = (<>); 
  //! A list of all entries in priority order, by using the multiset 
  //! builtin sorting through @[CacheEntry.`<]. 
 
  protected int max_used_pval; 
  // Used to detect when the entry pval's get too big to warrant a 
  // reset. 
  // 
  // For integers, this is the maximum used pval and we reset at the 
  // next gc when it gets over Int.NATIVE_MAX/2 (to have some spare 
  // room for the gc delay). 
  // 
  // For floats, we reset whenever L is so big that less than 8 
  // significant bits remains when v(p) is added to it. In that case 
  // max_used_pval only works as a flag, and we set it to 
  // Int.NATIVE_MAX when that state is reached. 
 
  int|float calc_value (string cache_name, CacheEntry entry, 
                        int old_entry, mapping cache_context); 
  //! Called to calculate the value for @[entry], which gets assigned 
  //! to the @expr{value@} variable. Arguments are the same as to 
  //! @[add_entry]. 
 
  void got_miss (string cache_name, mixed key, mapping cache_context) 
  { 
    account_miss (cache_name); 
  } 
 
  local protected int|float calc_pval (CacheEntry entry) 
  { 
    int|float pval; 
    if (CacheEntry lowest = get_iterator (priority_list)->index()) { 
      int|float l = lowest->pval, v = entry->value; 
      pval = l + v; 
 
      if (floatp (v)) { 
        if (v != 0.0 && v < l * (Float.EPSILON * 0x10)) { 
#ifdef DEBUG 
        if (max_used_pval != Int.NATIVE_MAX) 
            werror ("%O: Ran out of significant digits for cache entry %O - " 
                    "got min priority %O and entry value %O.\n", 
                    this, entry, l, v); 
#endif 
        // Force a reset of the pvals in the next gc. 
          max_used_pval = Int.NATIVE_MAX; 
        } 
      } 
      else if (pval > max_used_pval) 
        max_used_pval = pval; 
    } 
    else 
      // Assume entry->value isn't greater than Int.NATIVE_MAX/2 right away. 
      pval = entry->value; 
    return pval; 
  } 
 
  void got_hit (string cache_name, CacheEntry entry, mapping cache_context) 
  { 
    account_hit (cache_name, entry); 
    int|float pval = calc_pval (entry); 
    // vvv Relying on the interpreter lock from here. 
    priority_list[entry] = 0; 
    entry->pval = pval; 
    priority_list[entry] = 1; 
    // ^^^ Relying on the interpreter lock to here. 
  } 
 
  int add_entry (string cache_name, CacheEntry entry, 
                 int old_entry, mapping cache_context) 
  { 
    entry->cache_name = cache_name; 
    int|float v = entry->value = 
      calc_value (cache_name, entry, old_entry, cache_context); 
 
    if (!low_add_entry (cache_name, entry)) return 0; 
 
    entry->pval = calc_pval (entry); 
    priority_list[entry] = 1; 
 
    if (size > size_limit) evict (size_limit); 
    return 1; 
  } 
 
  int remove_entry (string cache_name, CacheEntry entry) 
  { 
    priority_list[entry] = 0; 
    return low_remove_entry (cache_name, entry); 
  } 
 
  void evict (int max_size) 
  { 
    while (size > max_size) { 
      CacheEntry entry = get_iterator (priority_list)->index(); 
      if (!entry) break; 
      MORE_CACHE_WERR ("%s: Size is %d - evicting %O.\n", 
                       entry->cache_name, size, entry); 
      priority_list[entry] = 0; 
      low_remove_entry (entry->cache_name, entry); 
    } 
  } 
 
  void after_gc() 
  { 
    if (max_used_pval > Int.NATIVE_MAX / 2) { 
      // The neat thing to do here is to lower all priority values, 
      // but it has to be done atomically. Since this presumably 
      // happens so seldom we take the easy way and just empty the 
      // caches instead. 
      CACHE_WERR ("%O: Max priority value too large - resetting.\n", this); 
 
      if (Configuration admin_config = roxenp()->get_admin_configuration()) 
        // Log an event, in case it doesn't happen that seldom afterall. 
        admin_config->log_event ("roxen", "reset-ram-cache", this->name); 
 
      while (sizeof (priority_list)) 
        evict (0); 
      max_used_pval = 0; 
    } 
  } 
} 
 
class CM_GDS_1 
{ 
  inherit CM_GreedyDual; 
 
  constant name = "GDS(1)"; 
  constant doc = #"\ 
This cache manager implements <a 
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> 
with the cost of each entry fixed at 1, which makes it optimize the 
cache hit ratio."; 
 
  float calc_value (string cache_name, CacheEntry entry, 
                    int old_entry, mapping cache_context) 
  { 
    ASSERT_IF_DEBUG (entry->size /*%O*/ > 10, entry->size); 
    return 1.0 / entry->size; 
  } 
} 
 
protected CM_GDS_1 cm_gds_1 = CM_GDS_1 (startup_cache_size); 
 
protected Thread.Local cache_contexts = Thread.Local(); 
// A thread local mapping to store the timestamp from got_miss so it 
// can be read from the (presumably) following add_entry. 
// 
// In an entry with index 0 in the mapping, the time spent creating 
// cache entries is accumulated. It is used to deduct the time for 
// creating entries in subcaches. 
 
class CM_GDS_Time 
//! Like @[CM_GDS_1] but adds support for calculating entry cost based 
//! on passed time. 
{ 
  inherit CM_GreedyDual; 
 
#ifdef RAMCACHE_STATS 
  class CacheEntry 
  { 
    inherit CM_GreedyDual::CacheEntry; 
    int|float cost; 
 
    protected string _sprintf (int flag) 
    { 
      return flag == 'O' && sprintf ("CacheEntry(%s, %db, %O, %O)", 
                                     format_key(), size, value, cost); 
    } 
  } 
#endif 
 
  protected int gettime_func(); 
  //! Returns the current time for cost calculation. (@[format_cost] 
  //! assumes this is in microseconds.) 
 
  protected void save_start_hrtime (string cache_name, mixed key, 
                                    mapping cache_context) 
  { 
    if (mapping all_ctx = cache_context || cache_contexts->get()) { 
      int start = gettime_func() - all_ctx[0]; 
 
      if (mapping(mixed:int) ctx = all_ctx[cache_name]) { 
#if 0 
      if (!zero_type (ctx[key])) 
          // This warning is useful since strictly speaking we don't 
          // know which cache_lookup calls to use as start for the 
          // time measurement, so the time cost might be bogus. If it 
          // isn't the last one then you should probably replace some 
          // calls with cache_peek. 
          werror ("Warning: Detected repeated missed lookup calls.\n%s\n", 
                  describe_backtrace (backtrace())); 
#endif 
      ctx[key] = start; 
      } 
      else 
        all_ctx[cache_name] = ([key: start]); 
    } 
 
    else { 
#ifdef DEBUG 
      werror ("Warning: Got call from %O without cache context mapping.\n%s\n", 
              Thread.this_thread(), describe_backtrace (backtrace())); 
#endif 
    } 
  } 
 
  void got_miss (string cache_name, mixed key, mapping cache_context) 
  { 
    //werror ("Miss.\n%s\n", describe_backtrace (backtrace())); 
    account_miss (cache_name); 
    save_start_hrtime (cache_name, key, cache_context); 
  } 
 
  void got_hit (string cache_name, CacheEntry entry, mapping cache_context) 
  { 
    // It shouldn't be necessary to record the start time for cache 
    // hits, but do it anyway for now since there are caches that on 
    // cache hits extend the entries with more data. 
    account_hit (cache_name, entry); 
    save_start_hrtime (cache_name, entry->key, cache_context); 
  } 
 
  protected int entry_create_hrtime (string cache_name, mixed key, 
                                     mapping cache_context) 
  { 
    if (mapping all_ctx = cache_context || cache_contexts->get()) 
      if (mapping(mixed:int) ctx = all_ctx[cache_name]) { 
        int start = m_delete (ctx, key); 
        if (!zero_type (start)) { 
          int duration = (gettime_func() - all_ctx[0]) - start; 
          ASSERT_IF_DEBUG (duration >= 0); 
          all_ctx[0] += duration; 
          return duration; 
        } 
      } 
#ifdef DEBUG 
    werror ("Warning: No preceding missed lookup for this key - " 
            "cannot determine entry creation time.\n%s\n", 
            describe_backtrace (backtrace())); 
#endif 
    return 0; 
  } 
 
  protected float mean_cost; 
  protected int mean_count = 0; 
  // This is not a real mean value since we (normally) don't keep 
  // track of the cost of each entry. Instead it's a decaying average. 
 
  float calc_value (string cache_name, CacheEntry entry, 
                    int old_entry, mapping cache_context) 
  { 
    if (int hrtime = !old_entry && 
        entry_create_hrtime (cache_name, entry->key, cache_context)) { 
      float cost = (float) hrtime; 
#ifdef RAMCACHE_STATS 
      entry->cost = cost; 
#endif 
 
      if (!mean_count) { 
        mean_cost = cost; 
        mean_count = 1; 
      } 
      else { 
        mean_cost = (mean_count * mean_cost + cost) / (mean_count + 1); 
        if (mean_count < 1000) mean_count++; 
      } 
 
      return cost / entry->size; 
    } 
 
    // Awkward situation: We don't have any cost for this entry. Just 
    // use the mean cost of all entries in the cache, so it at least 
    // isn't way off in either direction. 
    return mean_cost / entry->size; 
  } 
 
  void evict (int max_size) 
  { 
    ::evict (max_size); 
    if (!max_size) mean_count = 0; 
  } 
 
  string format_cost (float cost) 
  { 
    return Roxen.format_hrtime ((int) cost); 
  } 
} 
 
class CM_GDS_CPUTime 
{ 
  inherit CM_GDS_Time; 
 
  constant name = "GDS(cpu time)"; 
  string doc = #"\ 
This cache manager implements <a 
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> 
with the cost of each entry determined by the CPU time it took to 
create it. The CPU time implementation is " + 
    Roxen.html_encode_string (System.CPU_TIME_IMPLEMENTATION) + 
    " which is " + 
    (System.CPU_TIME_IS_THREAD_LOCAL ? "thread local" : "not thread local") + 
    " and has a resolution of " + 
    (System.CPU_TIME_RESOLUTION / 1e6) + " ms."; 
 
  protected int gettime_func() 
  { 
    return gethrvtime(); 
  } 
} 
 
protected CM_GDS_CPUTime cm_gds_cputime = CM_GDS_CPUTime (startup_cache_size); 
 
class CM_GDS_RealTime 
{ 
  inherit CM_GDS_Time; 
 
  constant name = "GDS(real time)"; 
  string doc = #"\ 
This cache manager implements <a 
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> 
with the cost of each entry determined by the real (wall) time it took 
to create it. The real time implementation is " + 
    Roxen.html_encode_string (System.REAL_TIME_IMPLEMENTATION) + 
    " which is " + 
    (System.REAL_TIME_IS_MONOTONIC ? "monotonic" : "not monotonic") + 
    " and has a resolution of " + 
    (System.REAL_TIME_RESOLUTION / 1e6) + " ms."; 
 
  protected int gettime_func() 
  { 
    // The real time includes a lot of noise that isn't appropriate 
    // for cache entry cost measurement. Let's compensate for the time 
    // spent in the pike gc, at least. 
    return gethrtime() - Pike.implicit_gc_real_time(); 
  } 
} 
 
protected CM_GDS_RealTime cm_gds_realtime = 
  CM_GDS_RealTime (startup_cache_size); 
 
#ifdef DEBUG_CACHE_SIZES 
protected int cmp_sizeof_cache_entry (string cache_name, CacheEntry entry) 
{ 
  int res; 
  mixed data = entry->data; 
  mapping opts = (["block_strings": 1, 
#if DEBUG_CACHE_SIZES > 1 
                 "collect_internals": 1, 
#endif 
               ]); 
  if (function(int|mapping:int) cm_cb = 
      objectp (data) && data->cache_count_memory) 
    res = cm_cb (opts) + Pike.count_memory (-1, entry, entry->key); 
  else 
    res = Pike.count_memory (opts, entry, entry->key, data); 
#if DEBUG_CACHE_SIZES > 1 
  werror ("Internals counted for %O / %O: ({\n%{%s,\n%}})\n", 
          cache_name, entry, 
          sort (map (opts->collect_internals, 
                     lambda (mixed m) {return sprintf ("%O", m);}))); 
#endif 
  return res; 
} 
#endif 
 
//! The preferred managers according to various caching requirements. 
//! When several apply for a cache, choose the first one in this list. 
//! 
//! @dl 
//! @item "default" 
//!   The default manager for caches that do not specify any 
//!   requirements. 
//! 
//! @item "no_cpu_timings" 
//!   The manager to use for caches where a cache entry is created 
//!   synchronously by one thread, but that thread spends most of its 
//!   time waiting for a result from an external party, meaning that 
//!   consumed cpu time is not an accurate measurement of the cost. 
//! 
//! @item "no_thread_timings" 
//!   The manager to use for caches where a cache entry isn't created 
//!   synchronously by one thread in the span between the 
//!   @[cache_lookup] miss and the following @[cache_set]. 
//! 
//! @item "no_timings" 
//!   The manager to use for caches that do not have a usage pattern 
//!   where it is meaningful to calculate the creation cost from the 
//!   time between a @[cache_lookup] miss to the following 
//!   @[cache_set]. 
//! @enddl 
mapping(string:CacheManager) cache_manager_prefs = ([ 
  "default": (System.CPU_TIME_IS_THREAD_LOCAL != "yes" || 
              System.CPU_TIME_RESOLUTION > 10000 ? 
              // Don't use cpu time if it's too bad. Buglet: We just 
              // assume the real time is better. 
              cm_gds_realtime : 
              cm_gds_cputime), 
  "no_cpu_timings": cm_gds_realtime, 
  "no_thread_timings": cm_gds_realtime, 
  "no_timings": cm_gds_1, 
]); 
 
//! All available cache managers. 
array(CacheManager) cache_managers = 
  Array.uniq (({cache_manager_prefs->default, 
                cache_manager_prefs->no_cpu_timings, 
                cache_manager_prefs->no_thread_timings, 
                cache_manager_prefs->no_timings, 
#if 0 
              cm_random, 
#endif 
              cm_gds_1, 
                cm_gds_realtime, 
                cm_gds_cputime, 
              })); 
 
protected mapping(string:CacheManager) caches = ([]); 
// Maps the named caches to the cache managers that handle them. 
 
protected Thread.Mutex cache_mgmt_mutex = Thread.Mutex(); 
// Locks operations that manipulate named caches, i.e. changes in the 
// caches, CacheManager.stats and CacheManager.lookup mappings. 
 
void set_total_size_limit (int size) 
//! Sets the total size limit available to all caches. 
{ 
  // FIXME: Currently this is per-cache. 
  foreach (cache_managers, CacheManager mgr) { 
    mgr->total_size_limit = size; 
    mgr->update_size_limit(); 
  } 
} 
 
mapping(string:CacheManager) cache_list() 
//! Returns a list of all currently registered caches and their 
//! managers. 
{ 
  return caches + ([]); 
} 
 
CacheManager cache_register (string cache_name, 
                             void|string|CacheManager manager) 
//! Registers a new cache. Returns its @[CacheManager] instance. 
//! 
//! @[manager] can be a specific @[CacheManager] instance to use, a 
//! string that specifies a type of manager (see 
//! @[cache_manager_prefs]), or zero to select the default manager. 
//! 
//! If the cache already exists, its current manager is simply 
//! returned, and @[manager] has no effect. 
//! 
//! Registering a cache is not mandatory before it is used - one will 
//! be created automatically with the default manager otherwise. 
//! Still, it's a good idea so that the cache list in the admin 
//! interface gets populated timely. 
 
{ 
  Thread.Mutex lock = 
    cache_mgmt_mutex->lock (2); // Called from cache_change_manager too. 
 
  if (CacheManager mgr = caches[cache_name]) 
    return mgr; 
 
  if (!manager) manager = cache_manager_prefs->default; 
  else if (stringp (manager)) { 
    string cache_type = manager; 
    manager = cache_manager_prefs[cache_type]; 
    if (!manager) error ("Unknown cache manager type %O requested.\n", 
                         cache_type); 
  } 
 
  caches[cache_name] = manager; 
  manager->stats[cache_name] = CacheStats(); 
  manager->lookup[cache_name] = ([]); 
  return manager; 
} 
 
void cache_unregister (string cache_name) 
//! Unregisters the specified cache. This empties the cache and also 
//! removes it from the cache overview in the admin interface. 
{ 
  Thread.Mutex lock = cache_mgmt_mutex->lock(); 
 
  // vvv Relying on the interpreter lock from here. 
  if (CacheManager mgr = m_delete (caches, cache_name)) { 
    mapping(mixed:CacheEntry) lm = m_delete (mgr->lookup, cache_name); 
    CacheStats cs = m_delete (mgr->stats, cache_name); 
    // ^^^ Relying on the interpreter lock to here. 
    mgr->size -= cs->size; 
 
    destruct (lock); 
    foreach (lm;; CacheEntry entry) 
      mgr->remove_entry (cache_name, entry); 
  } 
} 
 
void cache_change_manager (string cache_name, CacheManager manager) 
//! Changes the manager for a cache. All the cache entries are moved 
//! to the new manager, but it might not have adequate information to 
//! give them an accurate cost (typically applies to cost derived from 
//! the creation time). 
{ 
  Thread.Mutex lock = cache_mgmt_mutex->lock(); 
 
  // vvv Relying on the interpreter lock from here. 
  CacheManager old_mgr = m_delete (caches, cache_name); 
  if (old_mgr == manager) 
    caches[cache_name] = manager; 
    // ^^^ Relying on the interpreter lock to here. 
 
  else { 
    mapping(mixed:CacheEntry) old_lm = m_delete (old_mgr->lookup, cache_name); 
    CacheStats old_cs = m_delete (old_mgr->stats, cache_name); 
    // ^^^ Relying on the interpreter lock to here. 
    old_mgr->size -= old_cs->size; 
    cache_register (cache_name, manager); 
 
    // Move over the entries. 
    destruct (lock); 
    int entry_size_diff = (Pike.count_memory (0, manager->CacheEntry (0, 0)) - 
                           Pike.count_memory (0, old_mgr->CacheEntry (0, 0))); 
    foreach (old_lm; mixed key; CacheEntry old_ent) { 
      old_mgr->remove_entry (cache_name, old_ent); 
      CacheEntry new_ent = manager->CacheEntry (key, old_ent->data); 
      new_ent->size = old_ent->size + entry_size_diff; 
      manager->add_entry (cache_name, new_ent, 1, 0); 
    } 
    manager->update_size_limit(); // Evicts superfluous entries if necessary. 
  } 
} 
 
void cache_expire (void|string cache_name) 
//! Expires (i.e. removes) all entries in a named cache, or in all 
//! caches if @[cache_name] is left out. 
{ 
  // Currently not very efficiently implemented, but this function 
  // doesn't have to be quick. 
  foreach (cache_name ? ({cache_name}) : indices (caches), string cn) { 
    CACHE_WERR ("Emptying cache %O.\n", cn); 
    if (CacheManager mgr = caches[cn]) { 
      mgr->evict (0); 
      mgr->update_size_limit(); 
    } 
  } 
} 
 
void flush_memory_cache (void|string cache_name) {cache_expire (cache_name);} 
 
void cache_clear_deltas() 
{ 
  cache_contexts->set (([])); 
} 
 
mixed cache_lookup (string cache_name, mixed key, void|mapping cache_context) 
//! Looks up an entry in a cache. Returns @[UNDEFINED] if not found. 
//! 
//! @[cache_context] is an optional mapping used to pass info between 
//! @[cache_lookup] and @[cache_set], which some cache managers need 
//! to determine the cost of the created entry (the work done between 
//! a failed @[cache_lookup] and the following @[cache_set] with the 
//! same key is assumed to be the creation of the cache entry). 
//! 
//! If @[cache_context] is not specified, a thread local mapping is 
//! used. @[cache_context] is necessary when @[cache_lookup] and 
//! @[cache_set] are called from different threads, or in different 
//! callbacks from a backend. It should not be specified otherwise. 
//! 
//! If you need to use @[cache_context], create an empty mapping and 
//! give it to @[cache_lookup]. Then give the same mapping to the 
//! corresponding @[cache_set] when the entry has been created. 
{ 
  CacheManager mgr = caches[cache_name] || cache_register (cache_name); 
 
  if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) 
    if (CacheEntry entry = lm[key]) { 
 
      if (entry->timeout && entry->timeout <= time (1)) { 
        mgr->remove_entry (cache_name, entry); 
        mgr->got_miss (cache_name, key, cache_context); 
        MORE_CACHE_WERR ("cache_lookup (%O, %s): Timed out\n", 
                         cache_name, RXML.utils.format_short (key)); 
        return 0; 
      } 
 
      mgr->got_hit (cache_name, entry, cache_context); 
      MORE_CACHE_WERR ("cache_lookup (%O, %s): Hit\n", 
                       cache_name, RXML.utils.format_short (key)); 
      return entry->data; 
    } 
 
  mgr->got_miss (cache_name, key, cache_context); 
  MORE_CACHE_WERR ("cache_lookup (%O, %s): Miss\n", 
                   cache_name, RXML.utils.format_short (key)); 
  return 0; 
} 
 
mixed cache_peek (string cache_name, mixed key) 
//! Checks if the cache contains an entry. Same as @[cache_lookup] 
//! except that it doesn't affect the hit/miss statistics or the time 
//! accounting used to estimate entry creation cost. 
{ 
  if (CacheManager mgr = caches[cache_name]) 
    if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) 
      if (CacheEntry entry = lm[key]) { 
 
        if (entry->timeout && entry->timeout <= time (1)) { 
          mgr->remove_entry (cache_name, entry); 
          MORE_CACHE_WERR ("cache_peek (%O, %s): Timed out\n", 
                           cache_name, RXML.utils.format_short (key)); 
          return 0; 
        } 
 
        MORE_CACHE_WERR ("cache_peek (%O, %s): Entry found\n", 
                         cache_name, RXML.utils.format_short (key)); 
        return entry->data; 
      } 
 
  MORE_CACHE_WERR ("cache_peek (%O, %s): Entry not found\n", 
                   cache_name, RXML.utils.format_short (key)); 
  return 0; 
} 
 
mixed cache_set (string cache_name, mixed key, mixed data, void|int timeout, 
                 void|mapping cache_context) 
//! Adds an entry to a cache. 
//! 
//! @param cache_name 
//! The name of the cache. The cache has preferably been created with 
//! @[cache_register], but otherwise it is created on-demand using the 
//! default cache manager. 
//! 
//! @param key 
//! The key for the cache entry. Normally a string, but can be 
//! anything that works as an index in a mapping. 
//! 
//! @param data 
//! The payload data. This cannot be a zero, since the cache garb will 
//! consider that a destructed object and evict it from the cache. 
//! 
//! @param timeout 
//! If nonzero, sets the maximum time in seconds that the entry is 
//! valid. 
//! 
//! @param cache_context 
//! The cache context mapping given to the earlier @[cache_lookup] 
//! which failed to find the entry that this call adds to the cache. 
//! See @[cache_lookup] for more details. 
//! 
//! @returns 
//! Returns @[data]. 
{ 
  ASSERT_IF_DEBUG (data); 
 
  CacheManager mgr = caches[cache_name] || cache_register (cache_name); 
  CacheEntry new_entry = mgr->CacheEntry (key, data); 
 
#ifdef DEBUG_COUNT_MEM 
  mapping opts = (["lookahead": DEBUG_COUNT_MEM - 1, 
                   "collect_stats": 1, 
                   "collect_direct_externals": 1, 
                 ]); 
  float t = gauge { 
#else 
#define opts 0 
#endif 
 
      if (function(int|mapping:int) cm_cb = 
          objectp (data) && data->cache_count_memory) 
        new_entry->size = cm_cb (opts) + Pike.count_memory (-1, new_entry, key); 
      else 
        new_entry->size = Pike.count_memory (opts, new_entry, key, data); 
 
#ifdef DEBUG_COUNT_MEM 
    }; 
  werror ("%O: la %d size %d time %g int %d cyc %d ext %d vis %d revis %d " 
          "rnd %d wqa %d\n", 
          entry, opts->lookahead, opts->size, t, opts->internal, opts->cyclic, 
          opts->external, opts->visits, opts->revisits, opts->rounds, 
          opts->work_queue_alloc); 
 
#if 0 
  if (opts->external) { 
    opts->collect_direct_externals = 1; 
    // Raise the lookahead to 1 to recurse the closest externals. 
    if (opts->lookahead < 1) opts->lookahead = 1; 
 
    if (function(int|mapping:int) cm_cb = 
        objectp (data) && data->cache_count_memory) 
      res = cm_cb (opts) + Pike.count_memory (-1, entry, key); 
    else 
      res = Pike.count_memory (opts, entry, key, data); 
 
    array exts = opts->collect_direct_externals; 
    werror ("Externals found using lookahead %d: %O\n", 
            opts->lookahead, exts); 
#if 0 
    foreach (exts, mixed ext) 
      if (objectp (ext) && ext->locate_my_ext_refs) { 
        werror ("Refs to %O:\n", ext); 
        _locate_references (ext); 
      } 
#endif 
  } 
#endif 
 
#endif        // DEBUG_COUNT_MEM 
#undef opts 
 
#ifdef DEBUG_CACHE_SIZES 
  new_entry->cmp_size = cmp_sizeof_cache_entry (cache_name, new_entry); 
#endif 
 
  if (timeout) 
    new_entry->timeout = time (1) + timeout; 
 
  mgr->add_entry (cache_name, new_entry, 0, cache_context); 
 
  MORE_CACHE_WERR ("cache_set (%O, %s, %s, %O): %O\n", 
                   cache_name, RXML.utils.format_short (key), 
                   sprintf (objectp (data) ? "%O" : "%t", data), timeout, 
                   new_entry); 
 
  return data; 
} 
 
void cache_remove (string cache_name, mixed key) 
//! Removes an entry from the cache. 
//! 
//! @note 
//! If @[key] was zero, this function used to remove the whole cache. 
//! Use @[cache_expire] for that instead. 
{ 
  MORE_CACHE_WERR ("cache_remove (%O, %O)\n", cache_name, key); 
  if (CacheManager mgr = caches[cache_name]) 
    if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) 
      if (CacheEntry entry = lm[key]) 
        mgr->remove_entry (cache_name, entry); 
} 
 
mapping(mixed:CacheEntry) cache_entries (string cache_name) 
//! Returns the lookup mapping for the given named cache. Don't be 
//! destructive on the returned mapping or anything inside it. 
{ 
  if (CacheManager mgr = caches[cache_name]) 
    if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) 
      return lm; 
  return ([]); 
} 
 
array cache_indices(string|void cache_name) 
// Deprecated compat function. 
{ 
  if (!cache_name) 
    return indices (caches); 
  else 
    return indices (cache_entries (cache_name)); 
} 
 
mapping(CacheManager:mapping(string:CacheStats)) cache_stats() 
//! Returns the complete cache statistics. For each cache manager, a 
//! mapping with the named caches it handles is returned, with their 
//! respective @[CacheStat] objects. Don't be destructive on any part 
//! of the returned value. 
{ 
  mapping(CacheManager:mapping(string:CacheStats)) res = ([]); 
  foreach (cache_managers, CacheManager mgr) 
    res[mgr] = mgr->stats; 
  return res; 
} 
 
// GC statistics. These are decaying sums over the last 
// gc_stats_period seconds. 
constant gc_stats_period = 60 * 60; 
float sum_gc_runs = 0.0, sum_gc_time = 0.0; 
float sum_destruct_garbage_size = 0.0; 
float sum_timeout_garbage_size = 0.0; 
 
protected int cache_start_time = time(); 
int last_gc_run; 
 
protected void cache_clean() 
// Periodic gc, to clean up timed out and destructed entries. 
{ 
  int now = time (1); 
  int vt = gethrvtime(), t = gethrtime(); 
  int destr_garb_size, timeout_garb_size; 
 
  CACHE_WERR ("Starting RAM cache cleanup.\n"); 
 
  // Note: Might be necessary to always recheck the sizes here, since 
  // entries can change in size for a number of reasons. Most of the 
  // time it doesn't matter much, but the risk is that the size limit 
  // gets unacceptably off after a while. 
 
  foreach (caches; string cache_name; CacheManager mgr) { 
    if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) 
      foreach (lm;; CacheEntry entry) { 
        if (!entry->data) { 
          MORE_CACHE_WERR ("%s: Removing destructed entry %O\n", 
                           cache_name, entry); 
          destr_garb_size += entry->size; 
          mgr->remove_entry (cache_name, entry); 
        } 
 
        else if (entry->timeout && entry->timeout <= now) { 
          MORE_CACHE_WERR ("%s: Removing timed out entry %O\n", 
                           cache_name, entry); 
          timeout_garb_size += entry->size; 
          mgr->remove_entry (cache_name, entry); 
        } 
 
        else { 
#ifdef DEBUG_CACHE_SIZES 
        int size = cmp_sizeof_cache_entry (cache_name, entry); 
          if (size != entry->cmp_size) { 
            werror ("Size difference for %O / %O: " 
                    "Is %d, was %d in cache_set() - diff %d.\n", 
                    cache_name, entry, size, entry->cmp_size, 
                    size - entry->cmp_size); 
            // Update to avoid repeated messages. 
            entry->cmp_size = size; 
          } 
#endif 
      } 
      } 
  } 
 
  foreach (cache_managers, CacheManager mgr) 
    mgr->after_gc(); 
 
  vt = gethrvtime() - vt;       // -1 - -1 if cpu time isn't working. 
  t = gethrtime() - t; 
  CACHE_WERR ("Finished RAM cache cleanup - took %s.\n", 
              Roxen.format_hrtime (vt || t)); 
 
  int stat_last_period = now - last_gc_run; 
  int stat_tot_period = now - cache_start_time; 
  int startup = stat_tot_period < gc_stats_period; 
  if (!startup) stat_tot_period = gc_stats_period; 
 
  if (stat_last_period > stat_tot_period) { 
    // GC intervals are larger than the statistics interval, so just 
    // set the values. Note that stat_last_period is very large on the 
    // first call since last_gc_run is zero, so we always get here then. 
    sum_gc_time = (float) (vt || t); 
    sum_gc_runs = 1.0; 
    sum_destruct_garbage_size = (float) destr_garb_size; 
    sum_timeout_garbage_size = (float) timeout_garb_size; 
  } 
 
  else if (startup) { 
    sum_gc_time += (float) (vt || t); 
    sum_gc_runs += 1.0; 
    sum_destruct_garbage_size += (float) destr_garb_size; 
    sum_timeout_garbage_size += (float) timeout_garb_size; 
  } 
 
  else { 
    float weight = 1.0 - (float) stat_last_period / stat_tot_period; 
    sum_gc_runs = weight * sum_gc_runs + 1.0; 
    sum_gc_time = weight * sum_gc_time + (float) (vt || t); 
    sum_destruct_garbage_size = (weight * sum_destruct_garbage_size + 
                                 (float) destr_garb_size); 
    sum_timeout_garbage_size = (weight * sum_timeout_garbage_size + 
                                (float) timeout_garb_size); 
  } 
 
  last_gc_run = now; 
 
  if (Configuration admin_config = roxenp()->get_admin_configuration()) 
    admin_config->log_event ("roxen", "ram-gc", 0, ([ 
                               "handle-cputime": vt, 
                               "handle-time": t, 
                             ])); 
 
  // Fall back to 60 secs just in case the config is messed up somehow. 
  roxenp()->background_run (roxenp()->query ("mem_cache_gc_2") || 60, 
                            cache_clean); 
} 
 
#else  // !NEW_RAM_CACHE 
 
// Base the cache retention time on the time it took to 
// generate the entry. 
/* #define TIME_BASED_CACHE */ 
 
#ifdef TIME_BASED_CACHE 
// A cache entry is an array with six elements 
#define ENTRY_SIZE 6 
#else /* !TIME_BASED_CACHE */ 
// A cache entry is an array with four elements 
#define ENTRY_SIZE 4 
#endif /* TIME_BASED_CACHE */ 
// The elements are as follows: 
// A timestamp when the entry was last used 
#define TIMESTAMP 0 
// The actual data 
#define DATA 1 
// A timeout telling when the data is no longer valid. 
#define TIMEOUT 2 
// The size of the entry, in bytes. 
#define SIZE 3 
#ifdef TIME_BASED_CACHE 
// The approximate time in µs it took to generate the data for the entry. 
#define HRTIME 4 
// The number of hits for this entry. 
#define HITS 5 
#endif /* TIME_BASED_CACHE */ 
 
// The actual cache along with some statistics mappings. 
protected mapping(string:mapping(string:array)) cache; 
protected mapping(string:int) hits=([]), all=([]); 
 
#ifdef TIME_BASED_CACHE 
protected Thread.Local deltas = Thread.Local(); 
#endif /* TIME_BASED_CACHE */ 
 
#ifdef CACHE_DEBUG 
protected array(int) memory_usage_summary() 
{ 
  int count, bytes; 
  foreach (_memory_usage(); string descr; int amount) 
    if (has_prefix (descr, "num_")) count += amount; 
    else if (has_suffix (descr, "_bytes")) bytes += amount; 
  return ({count, bytes}); 
} 
#endif 
 
protected int sizeof_cache_entry (array entry) 
{ 
  int res; 
 
#ifdef DEBUG_COUNT_MEM 
  mapping opts = (["lookahead": DEBUG_COUNT_MEM - 1, 
                   "collect_stats": 1, 
                   "collect_direct_externals": 1, 
                 ]); 
  float t = gauge { 
#else 
#define opts 0 
#endif 
 
      if (function(int|mapping:int) cm_cb = 
          objectp (entry[DATA]) && entry[DATA]->cache_count_memory) 
        res = cm_cb (opts) + Pike.count_memory (-1, entry); 
      else 
        res = Pike.count_memory (opts, entry); 
 
#ifdef DEBUG_COUNT_MEM 
    }; 
  werror ("%s: la %d size %d time %g int %d cyc %d ext %d vis %d revis %d " 
          "rnd %d wqa %d\n", 
          (objectp (entry[DATA]) ? 
           sprintf ("%O", entry[DATA]) : sprintf ("%t", entry[DATA])), 
          opts->lookahead, opts->size, t, opts->internal, opts->cyclic, 
          opts->external, opts->visits, opts->revisits, opts->rounds, 
          opts->work_queue_alloc); 
 
#if 0 
  if (opts->external) { 
    opts->collect_direct_externals = 1; 
    // Raise the lookahead to 1 to recurse the closest externals. 
    if (opts->lookahead < 1) opts->lookahead = 1; 
 
    if (function(int|mapping:int) cm_cb = 
        objectp (entry[DATA]) && entry[DATA]->cache_count_memory) 
      res = cm_cb (opts) + Pike.count_memory (-1, entry); 
    else 
      res = Pike.count_memory (opts, entry); 
 
    array exts = opts->collect_direct_externals; 
    werror ("Externals found using lookahead %d: %O\n", 
            opts->lookahead, exts); 
#if 0 
    foreach (exts, mixed ext) 
      if (objectp (ext) && ext->locate_my_ext_refs) { 
        werror ("Refs to %O:\n", ext); 
        _locate_references (ext); 
      } 
#endif 
  } 
#endif 
#endif      // DEBUG_COUNT_MEM 
#undef opts 
 
  return res; 
} 
 
void flush_memory_cache (void|string in) 
{ 
  CACHE_WERR ("flush_memory_cache(%O)\n", in); 
 
  if (in) { 
    m_delete (cache, in); 
    m_delete (hits, in); 
    m_delete (all, in); 
  } 
 
  else { 
#ifdef CACHE_DEBUG 
    //gc(); 
    [int before_count, int before_bytes] = memory_usage_summary(); 
#endif 
    foreach (cache; string cache_class; mapping(string:array) subcache) { 
#ifdef CACHE_DEBUG 
      int num_entries_before= sizeof (subcache); 
#endif 
      m_delete (cache, cache_class); 
      m_delete (hits, cache_class); 
      m_delete (all, cache_class); 
#ifdef CACHE_DEBUG 
      //gc(); 
      [int after_count, int after_bytes] = memory_usage_summary(); 
      CACHE_WERR ("  Flushed %O that had %d entries: " 
                  "Freed %d things and %d bytes\n", 
                  cache_class, num_entries_before, 
                  before_count - after_count, before_bytes - after_bytes); 
      before_count = after_count; 
      before_bytes = after_bytes; 
#endif 
    } 
  } 
 
  CACHE_WERR ("flush_memory_cache() done\n"); 
} 
 
void cache_clear_deltas() 
{ 
#ifdef TIME_BASED_CACHE 
  deltas->set(([])); 
#endif /* TIME_BASED_CACHE */ 
} 
 
constant svalsize = 4*4; 
 
object cache_register (string cache_name, void|string|object manager) 
// Forward compat dummy. 
{ 
  return 0; 
} 
 
// Expire a whole cache 
void cache_expire(string in) 
{ 
  CACHE_WERR("cache_expire(%O)\n", in); 
  m_delete(cache, in); 
} 
 
// Lookup an entry in a cache 
mixed cache_lookup(string in, mixed what, void|mapping ignored) 
{ 
  all[in]++; 
  int t=time(1); 
#ifdef TIME_BASED_CACHE 
  mapping deltas = this_program::deltas->get() || ([]); 
  if (deltas[in]) { 
    deltas[in][what] = gethrtime(); 
  } else { 
    deltas[in] = ([ what : gethrtime() ]); 
  } 
#endif /* TIME_BASED_CACHE */ 
  // Does the entry exist at all? 
  if(array entry = (cache[in] && cache[in][what]) ) 
    // Is it time outed? 
    if (entry[TIMEOUT] && entry[TIMEOUT] < t) { 
      m_delete (cache[in], what); 
      MORE_CACHE_WERR("cache_lookup(%O, %O)  ->  Timed out\n", in, what); 
    } 
    else { 
      // Update the timestamp and hits counter and return the value. 
      cache[in][what][TIMESTAMP]=t; 
      MORE_CACHE_WERR("cache_lookup(%O, %O)  ->  Hit\n", in, what); 
      hits[in]++; 
#ifdef TIME_BASED_CACHE 
      entry[HITS]++; 
#endif /* TIME_BASED_CACHE */ 
      return entry[DATA]; 
    } 
  else 
    MORE_CACHE_WERR("cache_lookup(%O, %O)  ->  Miss\n", in, what); 
  return ([])[0]; 
} 
 
mixed cache_peek (string cache_name, mixed key) 
// Forward compat alias. 
{ 
  return cache_lookup (cache_name, key); 
} 
 
// Return all indices used by a given cache or indices of available caches 
array(string) cache_indices(string|void in) 
{ 
  if (in) 
    return (cache[in] && indices(cache[in])) || ({ }); 
  else 
    return indices(cache); 
} 
 
// Return some fancy cache statistics. 
mapping(string:array(int)) status() 
{ 
  mapping(string:array(int)) ret = ([ ]); 
  foreach (cache; string name; mapping(string:array) cache_class) { 
#ifdef DEBUG_COUNT_MEM 
    werror ("\nCache: %s\n", name); 
#endif 
    //  We only show names up to the first ":" if present. This lets us 
    //  group entries together in the status table. 
    string show_name = (name / ":")[0]; 
    int size = 0; 
    foreach (cache_class; string idx; array entry) { 
      if (!entry[SIZE]) 
        entry[SIZE] = Pike.count_memory (0, idx) + sizeof_cache_entry (entry); 
      size += entry[SIZE]; 
    } 
    array(int) entry = ({ sizeof(cache[name]), 
                          hits[name], 
                          all[name], 
                          size }); 
    if (!zero_type(ret[show_name])) 
      for (int idx = 0; idx <= 3; idx++) 
        ret[show_name][idx] += entry[idx]; 
    else 
      ret[show_name] = entry; 
  } 
  return ret; 
} 
 
// Remove an entry from the cache. Removes the entire cache if no 
// entry key is given. 
void cache_remove(string in, mixed what) 
{ 
  MORE_CACHE_WERR("cache_remove(%O, %O)\n", in, what); 
  if(!what) 
    m_delete(cache, in); 
  else 
    if(cache[in]) 
      m_delete(cache[in], what); 
} 
 
// Add an entry to a cache 
mixed cache_set(string in, mixed what, mixed to, int|void tm, 
                void|mapping ignored) 
{ 
  MORE_CACHE_WERR("cache_set(%O, %O, %O)\n", in, what, /* to */ _typeof(to)); 
  int t=time(1); 
  if(!cache[in]) 
    cache[in]=([ ]); 
  cache[in][what] = allocate(ENTRY_SIZE); 
  cache[in][what][DATA] = to; 
  if(tm) cache[in][what][TIMEOUT] = t + tm; 
  cache[in][what][TIMESTAMP] = t; 
#ifdef TIME_BASED_CACHE 
  mapping deltas = this_program::deltas->get() || ([]); 
  cache[in][what][HRTIME] = gethrtime() - (deltas[in] && deltas[in][what]); 
  cache[in][what][HITS] = 1; 
  CACHE_WERR("[%O] HRTIME: %d\n", in, cache[in][what][HRTIME]); 
#endif /* TIME_BASED_CACHE */ 
  return to; 
} 
 
// Clean the cache. 
void cache_clean() 
{ 
  int gc_time=[int](([function(string:mixed)]roxenp()->query)("mem_cache_gc")); 
  int now=time(1); 
#ifdef CACHE_DEBUG 
  [int mem_count, int mem_bytes] = memory_usage_summary(); 
  CACHE_WERR("cache_clean() [memory usage: %d things, %d bytes]\n", 
             mem_count, mem_bytes); 
#endif 
 
  foreach(cache; string cache_class_name; mapping(string:array) cache_class) 
  { 
#ifdef CACHE_DEBUG 
    int num_entries_before = sizeof (cache_class); 
#endif 
    MORE_CACHE_WERR("  Class %O\n", cache_class_name); 
#ifdef DEBUG_COUNT_MEM 
    werror ("\nCache: %s\n", cache_class_name); 
#endif 
 
    foreach(cache_class; string idx; array entry) 
    { 
#ifdef DEBUG 
      if(!intp(entry[TIMESTAMP])) 
        error("Illegal timestamp in cache ("+cache_class_name+":"+idx+")\n"); 
#endif 
      if(entry[TIMEOUT] && entry[TIMEOUT] < now) { 
        MORE_CACHE_WERR("    %O: Deleted (explicit timeout)\n", idx); 
        m_delete(cache_class, idx); 
      } 
      else { 
#ifdef TIME_BASED_CACHE 
        if (entry[HRTIME] < 10*60*1000000) {  // 10 minutes. 
            // Valid HRTIME entry. 
            // Let an entry live for 5000 times longer than 
            // it takes to create it times the 2-logarithm of 
            // the number of hits. 
            // Minimum one second. 
            // 5000/1000000 = 1/200 
            // FIXME: Adjust the factor dynamically? 
            int t = [int](entry[HRTIME]*(entry[HITS]->size(2)))/200 + 1; 
            if ((entry[TIMESTAMP] + t) < now) 
            { 
              m_delete(cache_class, idx); 
              MORE_CACHE_WERR("    %O with lifetime %d seconds (%d hits): Deleted\n", 
                              idx, t, entry[HITS]); 
            } else { 
              MORE_CACHE_WERR("    %O with lifetime %d seconds (%d hits): Ok\n", 
                              idx, t, entry[HITS]); 
            } 
            continue; 
          } 
#endif /* TIME_BASED_CACHE */ 
 
        if(!entry[SIZE]) 
          entry[SIZE] = Pike.count_memory (0, idx) + sizeof_cache_entry (entry); 
        if(entry[TIMESTAMP]+1 < now && 
           entry[TIMESTAMP] + gc_time - entry[SIZE] / 100 < now) 
        { 
          m_delete(cache_class, idx); 
          MORE_CACHE_WERR("    %O with size %d bytes: Deleted\n", 
                          idx, [int] entry[SIZE]); 
        } 
        else 
          MORE_CACHE_WERR("    %O with size %d bytes: Ok\n", 
                          idx, [int] entry[SIZE]); 
      } 
    } 
 
    if(!sizeof(cache_class)) 
      m_delete(cache, cache_class_name); 
 
#ifdef CACHE_DEBUG 
    [int new_mem_count, int new_mem_bytes] = memory_usage_summary(); 
    CACHE_WERR("  Class %O: Cleaned up %d of %d entries " 
               "[freed %d things and %d bytes]\n", 
               cache_class_name, 
               num_entries_before - sizeof (cache_class), 
               num_entries_before, 
               mem_count - new_mem_count, 
               mem_bytes - new_mem_bytes); 
    mem_count = new_mem_count; 
    mem_bytes = new_mem_bytes; 
#endif 
  } 
 
  CACHE_WERR("cache_clean() done\n"); 
  roxenp()->background_run (gc_time, cache_clean); 
} 
 
#endif        // !NEW_RAM_CACHE 
 
 
// --- Non-garbing "cache" ----------- 
 
private mapping(string:mapping(string:mixed)) nongc_cache; 
 
//! Associates a @[value] to a @[key] in a cache identified with 
//! the @[cache_id]. This cache does not garb, hence it should be 
//! used for storing data where its size is well controled. 
void nongarbing_cache_set(string cache_id, string key, mixed value) { 
  if(nongc_cache[cache_id]) 
    nongc_cache[cache_id][key] = value; 
  else 
    nongc_cache[cache_id] = ([ key:value ]); 
} 
 
//! Returns the value associated to the @[key] in the cache 
//! identified by @[cache_id] in the non-garbing cache. 
mixed nongarbing_cache_lookup(string cache_id, string key) { 
  return nongc_cache[cache_id]?nongc_cache[cache_id][key]:([])[0]; 
} 
 
//! Remove a value from the non-garbing cache. 
void nongarbing_cache_remove(string cache_id, string key) { 
  if(nongc_cache[cache_id]) m_delete(nongc_cache[cache_id], key); 
} 
 
//! Flush a cache in the non-garbing cache. 
void nongarbing_cache_flush(string cache_id) { 
  m_delete(nongc_cache, cache_id); 
} 
 
mapping(string:array(int)) ngc_status() { 
  mapping(string:array(int)) res = ([]); 
 
  foreach(nongc_cache; string cache; mapping(string:mixed) cachemap) { 
    int size = Pike.count_memory (0, cachemap); 
    res[cache] = ({ sizeof(cachemap), size}); 
  } 
 
  return res; 
} 
 
 
// --- Session cache ----------------- 
 
#ifndef SESSION_BUCKETS 
# define SESSION_BUCKETS 4 
#endif 
#ifndef SESSION_SHIFT_TIME 
# define SESSION_SHIFT_TIME 15*60 
#endif 
 
// The minimum time until which the session should be stored. 
private mapping(string:int) session_persistence; 
// The sessions, divided into several buckets. 
private array(mapping(string:mixed)) session_buckets; 
// The database for storage of the sessions. 
private function(string:Sql.Sql) db; 
// The biggest value in session_persistence 
private int max_persistence; 
 
// The low level call for storing a session in the database 
private void store_session(string id, mixed data, int t) { 
  data = encode_value(data); 
  db("local")->query("REPLACE INTO session_cache VALUES (%s," + t + ",%s)", 
                     id, data); 
} 
 
// GC that, depending on the sessions session_persistence either 
// throw the session away or store it in a database. 
private void session_cache_handler() { 
  int t=time(1); 
  if(max_persistence>t) { 
 
  clean: 
    foreach(indices(session_buckets[-1]), string id) { 
      if(session_persistence[id]<t) { 
        m_delete(session_buckets[-1], id); 
        m_delete(session_persistence, id); 
        continue; 
      } 
      for(int i; i<SESSION_BUCKETS-2; i++) 
        if(session_buckets[i][id]) { 
          continue clean; 
        } 
      if(objectp(session_buckets[-1][id])) { 
        m_delete(session_buckets[-1], id); 
        m_delete(session_persistence, id); 
        continue; 
      } 
      store_session(id, session_buckets[-1][id], session_persistence[id]); 
      m_delete(session_buckets[-1], id); 
      m_delete(session_persistence, id); 
    } 
  } 
 
  session_buckets = ({ ([]) }) + session_buckets[..SESSION_BUCKETS-2]; 
  roxenp()->background_run(SESSION_SHIFT_TIME, session_cache_handler); 
} 
 
// Stores all sessions that should be persistent in the database. 
// This function is called upon exit. 
private void session_cache_destruct() { 
  int t=time(1); 
  if(max_persistence>t) { 
    report_notice("Synchronizing session cache"); 
    foreach(session_buckets, mapping(string:mixed) session_bucket) 
      foreach(indices(session_bucket), string id) 
        if(session_persistence[id]>t) { 
          store_session(id, session_bucket[id], session_persistence[id]); 
          m_delete(session_persistence, id); 
        } 
  } 
  report_notice("Session cache synchronized\n"); 
} 
 
//! Removes the session data assiciate with @[id] from the 
//! session cache and session database. 
//! 
//! @seealso 
//!   set_session_data 
void clear_session(string id) { 
  m_delete(session_persistence, id); 
  foreach(session_buckets, mapping bucket) 
    m_delete(bucket, id); 
  db("local")->query("DELETE FROM session_cache WHERE id=%s", id); 
} 
 
//! Returns the data associated with the session @[id]. 
//! Returns a zero type upon failure. 
//! 
//! @seealso 
//!   set_session_data 
mixed get_session_data(string id) { 
  mixed data; 
  foreach(session_buckets, mapping bucket) 
    if(data=bucket[id]) { 
      session_buckets[0][id] = data; 
      return data; 
    } 
  data = db("local")->query("SELECT data FROM session_cache WHERE id=%s", id); 
  if(sizeof([array]data) && 
     !catch(data=decode_value( ([array(mapping(string:string))]data)[0]->data ))) 
    return data; 
  return ([])[0]; 
} 
 
//! Assiciates the session @[id] to the @[data]. If no @[id] is provided 
//! a unique id will be generated. The session id is returned from the 
//! function. The minimum guaranteed storage time may be set with the 
//! @[persistence] argument. Note that this is a time stamp, not a time out. 
//! If @[store] is set, the @[data] will be stored in a database directly, 
//! and not when the garbage collect tries to delete the data. This 
//! will ensure that the data is kept safe in case the server restarts 
//! before the next GC. 
//! 
//! @note 
//!   The @[data] must not contain any object, programs or functions, or the 
//!   storage in database will throw an error. 
//! 
//! @seealso 
//!   get_session_data, clear_session 
string set_session_data(mixed data, void|string id, void|int persistence, 
                        void|int(0..1) store) { 
  if(!id) id = ([function(void:string)]roxenp()->create_unique_id)(); 
  session_persistence[id] = persistence; 
  session_buckets[0][id] = data; 
  max_persistence = max(max_persistence, persistence); 
  if(store && persistence) store_session(id, data, persistence); 
  return id; 
} 
 
// Sets up the session database tables. 
private void setup_tables() { 
  db("local")->query("CREATE TABLE IF NOT EXISTS session_cache (" 
                     "id CHAR(32) NOT NULL PRIMARY KEY, " 
                     "persistence INT UNSIGNED NOT NULL DEFAULT 0, " 
                     "data BLOB NOT NULL)"); 
  master()->resolv("DBManager.is_module_table") 
    ( 0, "local", "session_cache", "Used by the session manager" ); 
} 
 
//! Initializes the session handler. 
void init_session_cache() { 
  db = (([function(string:function(string:object(Sql.Sql)))]master()->resolv) 
        ("DBManager.cached_get")); 
  setup_tables(); 
} 
 
void init_call_outs() 
{ 
  roxenp()->background_run(60, cache_clean); 
  roxenp()->background_run(SESSION_SHIFT_TIME, session_cache_handler); 
 
  CACHE_WERR("Cache garb call outs installed.\n"); 
} 
 
void create() 
{ 
  add_constant( "cache", this_object() ); 
#ifndef NEW_RAM_CACHE 
  cache = ([ ]); 
#endif 
 
  nongc_cache = ([ ]); 
 
  session_buckets = ({ ([]) }) * SESSION_BUCKETS; 
  session_persistence = ([]); 
 
  CACHE_WERR("Now online.\n"); 
} 
 
void destroy() { 
  session_cache_destruct(); 
  return; 
}