Rigs of Rods 2023.09
Soft-body Physics Simulation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
GenericFileFormat.cpp
Go to the documentation of this file.
1/*
2 This source file is part of Rigs of Rods
3 Copyright 2022 Petr Ohlidal
4
5 For more information, see http://www.rigsofrods.org/
6
7 Rigs of Rods is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 3, as
9 published by the Free Software Foundation.
10
11 Rigs of Rods is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with Rigs of Rods. If not, see <http://www.gnu.org/licenses/>.
18*/
19
20#include "GenericFileFormat.h"
21
22#include "Application.h"
23#include "Console.h"
24
25#include <algorithm>
26
27using namespace RoR;
28using namespace Ogre;
29
30enum class PartialToken
31{
32 NONE,
33 COMMENT_SEMICOLON, // Comment starting with ';'
34 COMMENT_SLASH, // Comment starting with '//'
36 STRING_QUOTED, // String starting/ending with '"'
37 STRING_NAKED, // String without '"' on either end
38 STRING_NAKED_CAPTURING_SPACES, // Only for OPTION_PARENTHESES_CAPTURE_SPACES - A naked string seeking the closing ')'.
39 TITLE_STRING, // A whole-line string, with spaces
40 NUMBER_STUB_MINUS, // Sole '-' character, may start a number or a naked string.
41 NUMBER_INTEGER, // Just digits and optionally leading '-'
42 NUMBER_DECIMAL, // Like INTEGER but already containing '.'
43 NUMBER_SCIENTIFIC_STUB, // Like DECIMAL, already containing 'e' or 'E' but not the exponent value.
44 NUMBER_SCIENTIFIC_STUB_MINUS, // Like SCIENTIFIC_STUB but with only '-' in exponent.
45 NUMBER_SCIENTIFIC, // Valid decimal number in scientific notation.
46 KEYWORD, // Unqoted string at the start of line. Accepted characters: alphanumeric and underscore
47 KEYWORD_BRACED, // Like KEYWORD but starting with '[' and ending with ']'
48 BOOL_TRUE, // Partial 'true'
49 BOOL_FALSE, // Partial 'false'
50 GARBAGE, // Text not fitting any above category, will be discarded
51};
52
54{
55 DocumentParser(GenericDocument& d, const BitMask_t opt, Ogre::DataStreamPtr ds)
56 : doc(d), options(opt), datastream(ds) {}
57
58 // Config
61 Ogre::DataStreamPtr datastream;
62
63 // State
64 std::vector<char> tok;
65 size_t line_num = 0;
66 size_t line_pos = 0;
68 bool title_found = false; // Only for OPTION_FIRST_LINE_IS_TITLE
69
70 void ProcessChar(const char c);
71 void ProcessEOF();
73
74 void BeginToken(const char c);
75 void UpdateComment(const char c);
76 void UpdateString(const char c);
77 void UpdateNumber(const char c);
78 void UpdateBool(const char c);
79 void UpdateKeyword(const char c);
80 void UpdateTitle(const char c); // Only for OPTION_FIRST_LINE_IS_TITLE
81 void UpdateGarbage(const char c);
82
83 void DiscontinueBool();
84 void DiscontinueNumber();
85 void DiscontinueKeyword();
87 void FlushNumericToken();
88};
89
91{
92 switch (c)
93 {
94 case '\r':
95 break;
96
97 case ' ':
98 case ',':
99 case '\t':
100 line_pos++;
101 break;
102
103 case ':':
105 {
106 line_pos++;
107 }
108 else
109 {
112 else
114 tok.push_back(c);
115 line_pos++;
116 }
117 break;
118
119 case '=':
121 {
122 line_pos++;
123 }
124 else
125 {
128 else
130 tok.push_back(c);
131 line_pos++;
132 }
133 break;
134
135 case '\n':
136 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
137 line_num++;
138 line_pos = 0;
139 break;
140
141 case ';':
143 line_pos++;
144 break;
145
146 case '/':
148 {
150 }
151 else
152 {
155 else
157 tok.push_back(c);
158 }
159 line_pos++;
160 break;
161
162 case '#':
164 {
166 }
167 else
168 {
171 else
173 tok.push_back(c);
174 }
175 line_pos++;
176 break;
177
178 case '[':
180 {
182 }
183 else
184 {
187 else
189 }
190 tok.push_back(c);
191 line_pos++;
192 break;
193
194 case '"':
196 line_pos++;
197 break;
198
199 case '.':
200 tok.push_back(c);
202 line_pos++;
203 break;
204
205 case 't':
206 tok.push_back(c);
208 line_pos++;
209 break;
210
211 case 'f':
212 tok.push_back(c);
214 line_pos++;
215 break;
216
217 case '0':
218 case '1':
219 case '2':
220 case '3':
221 case '4':
222 case '5':
223 case '6':
224 case '7':
225 case '8':
226 case '9':
228 tok.push_back(c);
229 line_pos++;
230 break;
231
232 case '-':
234 tok.push_back(c);
235 line_pos++;
236 break;
237
238 default:
239 if (isalpha(c) &&
240 (doc.tokens.size() == 0 || doc.tokens.back().type == TokenType::LINEBREAK)) // on line start?
241 {
242 tok.push_back(c);
244 }
246 {
247 tok.push_back(c);
249 }
250 else
251 {
253 tok.push_back(c);
254 }
255 line_pos++;
256 break;
257 }
258
260 && !title_found
261 && (doc.tokens.size() == 0 || doc.tokens.back().type == TokenType::LINEBREAK)
265 {
266 title_found = true;
268 }
269
271 {
273 fmt::format("{}, line {}, pos {}: stray character '{}'", datastream->getName(), line_num, line_pos, c));
274 }
275}
276
278{
279 switch (c)
280 {
281 case '\r':
282 break;
283
284 case '\n':
285 this->FlushStringishToken(TokenType::COMMENT);
286 // Break line
287 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
288 line_num++;
289 line_pos = 0;
290 break;
291
292 case '/':
293 if (partial_tok_type != PartialToken::COMMENT_SLASH || tok.size() > 0) // With COMMENT_SLASH, skip any number of leading '/'
294 {
295 tok.push_back(c);
296 }
297 line_pos++;
298 break;
299
300 default:
301 tok.push_back(c);
302 line_pos++;
303 break;
304 }
305}
306
308{
309 switch (c)
310 {
311 case '\r':
312 break;
313
314 case ' ':
317 {
318 tok.push_back(c);
319 }
320 else // (partial_tok_type == PartialToken::STRING_NAKED)
321 {
322 this->FlushStringishToken(TokenType::STRING);
323 }
324 line_pos++;
325 break;
326
327 case ',':
328 case '\t':
330 {
331 tok.push_back(c);
332 }
333 else // (partial_tok_type == PartialToken::STRING_NAKED)
334 {
335 this->FlushStringishToken(TokenType::STRING);
336 }
337 line_pos++;
338 break;
339
340 case '\n':
342 {
344 fmt::format("{}, line {}, pos {}: quoted string interrupted by newline", datastream->getName(), line_num, line_pos));
345 }
346 this->FlushStringishToken(TokenType::STRING);
347 // Break line
348 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
349 line_num++;
350 line_pos = 0;
351 break;
352
353 case ':':
356 {
357 this->FlushStringishToken(TokenType::STRING);
358 }
359 else
360 {
361 tok.push_back(c);
362 }
363 line_pos++;
364 break;
365
366 case '=':
369 {
370 this->FlushStringishToken(TokenType::STRING);
371 }
372 else
373 {
374 tok.push_back(c);
375 }
376 line_pos++;
377 break;
378
379 case '"':
381 {
382 this->FlushStringishToken(TokenType::STRING);
383 }
384 else // (partial_tok_type == PartialToken::STRING_NAKED)
385 {
387 tok.push_back(c);
388 }
389 line_pos++;
390 break;
391
392 case '(':
395 {
397 }
398 tok.push_back(c);
399 line_pos++;
400 break;
401
402 case ')':
404 {
406 }
407 tok.push_back(c);
408 line_pos++;
409 break;
410
411 default:
412 tok.push_back(c);
413 line_pos++;
414 break;
415 }
416
418 {
420 fmt::format("{}, line {}, pos {}: stray character '{}' in string", datastream->getName(), line_num, line_pos, c));
421 }
422}
423
425{
426 switch (c)
427 {
428 case '\r':
429 break;
430
431 case ' ':
432 case ',':
433 case '\t':
436 {
437 this->FlushStringishToken(TokenType::STRING);
438 }
439 else
440 {
441 this->FlushNumericToken();
442 }
443 line_pos++;
444 break;
445
446 case '\n':
448 && options & GenericDocument::OPTION_ALLOW_NAKED_STRINGS)
449 {
450 this->FlushStringishToken(TokenType::STRING);
451 }
452 else
453 {
454 this->FlushNumericToken();
455 }
456 // Break line
457 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
458 line_num++;
459 line_pos = 0;
460 break;
461
462 case ':':
464 {
467 {
468 this->FlushStringishToken(TokenType::STRING);
469 }
470 else
471 {
472 this->FlushNumericToken();
473 }
474 }
475 else
476 {
477 this->DiscontinueNumber();
478 tok.push_back(c);
479 }
480 line_pos++;
481 break;
482
483 case '=':
485 {
488 {
489 this->FlushStringishToken(TokenType::STRING);
490 }
491 else
492 {
493 this->FlushNumericToken();
494 }
495 }
496 else
497 {
498 this->DiscontinueNumber();
499 tok.push_back(c);
500 }
501 line_pos++;
502 break;
503
504 case '.':
507 {
509 }
510 else
511 {
512 this->DiscontinueNumber();
513 }
514 tok.push_back(c);
515 line_pos++;
516 break;
517
518 case 'e':
519 case 'E':
522 {
524 }
525 else
526 {
527 this->DiscontinueNumber();
528 }
529 tok.push_back(c);
530 line_pos++;
531 break;
532
533 case '-':
535 {
537 }
538 else
539 {
540 this->DiscontinueNumber();
541 }
542 tok.push_back(c);
543 line_pos++;
544 break;
545
546 case '0':
547 case '1':
548 case '2':
549 case '3':
550 case '4':
551 case '5':
552 case '6':
553 case '7':
554 case '8':
555 case '9':
558 {
560 }
562 {
564 }
565 tok.push_back(c);
566 line_pos++;
567 break;
568
569 default:
570 this->DiscontinueNumber();
571 tok.push_back(c);
572 line_pos++;
573 break;
574
575 }
576
578 {
580 fmt::format("{}, line {}, pos {}: stray character '{}' in number", datastream->getName(), line_num, line_pos, c));
581 }
582}
583
585{
586 this->DiscontinueBool();
587 switch (partial_tok_type)
588 {
590 this->FlushStringishToken(TokenType::KEYWORD);
591 break;
593 this->FlushStringishToken(TokenType::STRING);
594 break;
595 default:
596 // Discard token
597 tok.push_back('\0');
599 fmt::format("{}, line {}, pos {}: discarding incomplete boolean token '{}'", datastream->getName(), line_num, line_pos, tok.data()));
600 tok.clear();
602 break;
603 }
604}
605
607{
608 switch (c)
609 {
610 case '\r':
611 break;
612
613 case ' ':
614 case ',':
615 case '\t':
617 line_pos++;
618 break;
619
620 case '\n':
622 // Break line
623 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
624 line_num++;
625 line_pos = 0;
626 break;
627
628 case ':':
630 {
632 }
633 else
634 {
635 this->DiscontinueBool();
636 tok.push_back(c);
637 }
638 line_pos++;
639 break;
640
641 case '=':
643 {
645 }
646 else
647 {
648 this->DiscontinueBool();
649 tok.push_back(c);
650 }
651 line_pos++;
652 break;
653
654 case 'r':
655 if (partial_tok_type != PartialToken::BOOL_TRUE || tok.size() != 1)
656 {
657 this->DiscontinueBool();
658 }
659 tok.push_back(c);
660 line_pos++;
661 break;
662
663 case 'u':
664 if (partial_tok_type != PartialToken::BOOL_TRUE || tok.size() != 2)
665 {
666 this->DiscontinueBool();
667 }
668 tok.push_back(c);
669 line_pos++;
670 break;
671
672 case 'a':
673 if (partial_tok_type != PartialToken::BOOL_FALSE || tok.size() != 1)
674 {
675 this->DiscontinueBool();
676 }
677 tok.push_back(c);
678 line_pos++;
679 break;
680
681 case 'l':
682 if (partial_tok_type != PartialToken::BOOL_FALSE || tok.size() != 2)
683 {
684 this->DiscontinueBool();
685 }
686 tok.push_back(c);
687 line_pos++;
688 break;
689
690 case 's':
691 if (partial_tok_type != PartialToken::BOOL_FALSE || tok.size() != 3)
692 {
693 this->DiscontinueBool();
694 }
695 tok.push_back(c);
696 line_pos++;
697 break;
698
699 case 'e':
700 if (partial_tok_type == PartialToken::BOOL_TRUE && tok.size() == 3)
701 {
702 doc.tokens.push_back({ TokenType::BOOL, 1.f });
703 tok.clear();
705 }
706 else if (partial_tok_type == PartialToken::BOOL_FALSE && tok.size() == 4)
707 {
708 doc.tokens.push_back({ TokenType::BOOL, 0.f });
709 tok.clear();
711 }
712 else
713 {
714 this->DiscontinueBool();
715 tok.push_back(c);
716 }
717 line_pos++;
718 break;
719
720 default:
721 this->DiscontinueBool();
722 tok.push_back(c);
723 line_pos++;
724 break;
725 }
726
728 {
730 fmt::format("{}, line {}, pos {}: stray character '{}' in boolean", datastream->getName(), line_num, line_pos, c));
731 }
732}
733
743
751
759
761{
762 switch (c)
763 {
764 case '\r':
765 break;
766
767 case ' ':
768 case ',':
769 case '\t':
770 this->FlushStringishToken(TokenType::KEYWORD);
771 line_pos++;
772 break;
773
774 case '\n':
775 this->FlushStringishToken(TokenType::KEYWORD);
776 // Break line
777 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
778 line_num++;
779 line_pos = 0;
780 break;
781
782 case ':':
784 {
785 this->FlushStringishToken(TokenType::KEYWORD);
786 }
787 else
788 {
789 this->DiscontinueKeyword();
790 tok.push_back(c);
791 }
792 line_pos++;
793 break;
794
795 case '=':
797 {
798 this->FlushStringishToken(TokenType::KEYWORD);
799 }
800 else
801 {
802 this->DiscontinueKeyword();
803 tok.push_back(c);
804 }
805 line_pos++;
806 break;
807
808 case '_':
809 tok.push_back(c);
810 line_pos++;
811 break;
812
813 case '(':
815 {
818 else
820 }
821 else
822 {
824 }
825 tok.push_back(c);
826 line_pos++;
827 break;
828
829 case ']':
831 {
832 partial_tok_type = PartialToken::KEYWORD; // Do not allow any more ']'.
833 }
834 else
835 {
836 this->DiscontinueKeyword();
837 }
838 tok.push_back(c);
839 line_pos++;
840 break;
841
842 default:
843 if (!isalnum(c))
844 {
845 this->DiscontinueKeyword();
846 }
847 tok.push_back(c);
848 line_pos++;
849 break;
850 }
851
853 {
855 fmt::format("{}, line {}, pos {}: stray character '{}' in keyword", datastream->getName(), line_num, line_pos, c));
856 }
857}
858
860{
861 switch (c)
862 {
863 case '\r':
864 break;
865
866 case '\n':
867 this->FlushStringishToken(TokenType::STRING);
868 // Break line
869 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
870 line_num++;
871 line_pos = 0;
872 break;
873
874 default:
875 tok.push_back(c);
876 line_pos++;
877 break;
878 }
879}
880
882{
883 switch (c)
884 {
885 case '\r':
886 break;
887
888 case ' ':
889 case ',':
890 case '\t':
891 case '\n':
892 tok.push_back('\0');
894 fmt::format("{}, line {}, pos {}: discarding garbage token '{}'", datastream->getName(), line_num, line_pos, tok.data()));
895 tok.clear();
897 line_pos++;
898 break;
899
900 default:
901 tok.push_back(c);
902 line_pos++;
903 break;
904 }
905}
906
908{
909 doc.tokens.push_back({ type, (float)doc.string_pool.size() });
910 tok.push_back('\0');
911 std::copy(tok.begin(), tok.end(), std::back_inserter(doc.string_pool));
912 tok.clear();
914}
915
917{
918 tok.push_back('\0');
920 {
921 doc.tokens.push_back({ TokenType::INT, (float)Ogre::StringConverter::parseInt(tok.data()) });
922 }
923 else
924 {
925 doc.tokens.push_back({ TokenType::FLOAT, (float)Ogre::StringConverter::parseReal(tok.data()) });
926 }
927 tok.clear();
929}
930
932{
933 switch (partial_tok_type)
934 {
936 this->BeginToken(c);
937 break;
938
942 this->UpdateComment(c);
943 break;
944
948 this->UpdateString(c);
949 break;
950
957 this->UpdateNumber(c);
958 break;
959
962 this->UpdateBool(c);
963 break;
964
967 this->UpdateKeyword(c);
968 break;
969
971 this->UpdateTitle(c);
972 break;
973
975 this->UpdateGarbage(c);
976 break;
977 }
978}
979
981{
982 // Flush any partial token
983 switch (partial_tok_type)
984 {
988 this->FlushStringishToken(TokenType::STRING);
989 break;
990
992 this->FlushStringishToken(TokenType::KEYWORD);
993 break;
994
995 default:
996 this->ProcessChar(' '); // Pretend processing a separator to flush any partial whitespace-incompatible token.
997 break;
998 }
999
1000 // Ensure newline at end of file
1001 if (doc.tokens.size() == 0 || doc.tokens.back().type != TokenType::LINEBREAK)
1002 {
1003 doc.tokens.push_back({ TokenType::LINEBREAK, 0.f });
1004 }
1005}
1006
1007void GenericDocument::loadFromDataStream(Ogre::DataStreamPtr datastream, const BitMask_t options)
1008{
1009 // Reset the document
1010 tokens.clear();
1011 string_pool.clear();
1012
1013 // Prepare context
1014 DocumentParser parser(*this, options, datastream);
1015 const size_t LINE_BUF_MAX = 10 * 1024; // 10Kb
1016 char buf[LINE_BUF_MAX];
1017
1018 // Parse the text
1019 while (!datastream->eof())
1020 {
1021 size_t buf_len = datastream->read(buf, LINE_BUF_MAX);
1022 for (size_t i = 0; i < buf_len; i++)
1023 {
1024 const char c = buf[i];
1025
1026 parser.ProcessChar(c);
1027 }
1028 }
1029 parser.ProcessEOF();
1030}
1031
1032#if OGRE_PLATFORM == OGRE_PLATFORM_WIN32
1033 const char* EOL_STR = "\r\n"; // CR+LF
1034#else
1035 const char* EOL_STR = "\n"; // "LF"
1036#endif
1037
1038void GenericDocument::saveToDataStream(Ogre::DataStreamPtr datastream)
1039{
1040 std::string separator;
1041 const char* pool_str = nullptr;
1042 const size_t BUF_MAX = 100;
1043 char buf[BUF_MAX];
1044
1045 for (Token& tok : tokens)
1046 {
1047 switch (tok.type)
1048 {
1050 datastream->write(EOL_STR, strlen(EOL_STR));
1051 separator = "";
1052 break;
1053
1054 case TokenType::COMMENT:
1055 datastream->write(";", 1);
1056 pool_str = string_pool.data() + (size_t)tok.data;
1057 datastream->write(pool_str, strlen(pool_str));
1058 break;
1059
1060 case TokenType::STRING:
1061 datastream->write(separator.data(), separator.size());
1062 pool_str = string_pool.data() + (size_t)tok.data;
1063 datastream->write(pool_str, strlen(pool_str));
1064 separator = ", ";
1065 break;
1066
1067 case TokenType::FLOAT:
1068 datastream->write(separator.data(), separator.size());
1069 snprintf(buf, BUF_MAX, "%g", tok.data);
1070 datastream->write(buf, strlen(buf));
1071 separator = ", ";
1072 break;
1073
1074 case TokenType::INT:
1075 datastream->write(separator.data(), separator.size());
1076 snprintf(buf, BUF_MAX, "%d", (int)tok.data);
1077 datastream->write(buf, strlen(buf));
1078 separator = ", ";
1079 break;
1080
1081 case TokenType::BOOL:
1082 datastream->write(separator.data(), separator.size());
1083 snprintf(buf, BUF_MAX, "%s", tok.data == 1.f ? "true" : "false");
1084 datastream->write(buf, strlen(buf));
1085 separator = ", ";
1086 break;
1087
1088 case TokenType::KEYWORD:
1089 pool_str = string_pool.data() + (size_t)tok.data;
1090 datastream->write(pool_str, strlen(pool_str));
1091 separator = " ";
1092 break;
1093 }
1094 }
1095}
1096
1097bool GenericDocument::loadFromResource(std::string resource_name, std::string resource_group_name, BitMask_t options/* = 0*/)
1098{
1099 try
1100 {
1101 Ogre::DataStreamPtr datastream = Ogre::ResourceGroupManager::getSingleton().openResource(resource_name, resource_group_name);
1102 this->loadFromDataStream(datastream, options);
1103 return true;
1104 }
1105 catch (Ogre::Exception& eeh)
1106 {
1108 fmt::format("GenericDocument: could not load file '{}' from resource group '{}': {}", resource_name, resource_group_name, eeh.getDescription()));
1109 return false;
1110 }
1111}
1112
1113bool GenericDocument::saveToResource(std::string resource_name, std::string resource_group_name)
1114{
1115 try
1116 {
1117 Ogre::DataStreamPtr datastream = Ogre::ResourceGroupManager::getSingleton().createResource(resource_name, resource_group_name);
1118 this->saveToDataStream(datastream);
1119 return true;
1120 }
1121 catch (Ogre::Exception& eeh)
1122 {
1124 fmt::format("GenericDocument: could not write file '{}' to resource group '{}': {}", resource_name, resource_group_name, eeh.getDescription()));
1125 return false;
1126 }
1127}
1128
1130{
1131 // Skip current line
1132 while (!this->endOfFile() && this->tokenType() != TokenType::LINEBREAK)
1133 {
1134 this->moveNext();
1135 }
1136 this->moveNext();
1137
1138 // Skip comments and empty lines
1139 while (!this->endOfFile() && (this->isTokComment(0) || this->isTokLineBreak(0)))
1140 {
1141 this->moveNext();
1142 }
1143
1144 return this->endOfFile();
1145}
1146
1148{
1149 int count = 0;
1150 while (!endOfFile(count) && this->tokenType(count) != TokenType::LINEBREAK)
1151 count++;
1152 return count;
1153}
1154
1155// -----------------
1156// Editing functions
1157
1159{
1160 if (count <= 0)
1161 return;
1162
1163 token_pos = (int)doc->tokens.size();
1164 for (int i = 0; i < count; i++)
1165 {
1166 doc->tokens.push_back({ TokenType::NONE, 0.f });
1167 }
1168}
1169
1171{
1172 if (endOfFile(offset))
1173 return false;
1174
1175 doc->tokens.insert(doc->tokens.begin() + token_pos + offset, { TokenType::NONE, 0.f });
1176 return true;
1177}
1178
1180{
1181 if (endOfFile(offset))
1182 return false;
1183
1184 // Just erase the token.
1185 // We don't care about garbage in `string_pool` - the strings are usually just 1-6 characters long anyway.
1186
1187 doc->tokens.erase(doc->tokens.begin() + token_pos + offset);
1188 return true;
1189}
1190
1191bool GenericDocContext::setStringData(int offset, TokenType type, const std::string& data)
1192{
1193 if (endOfFile(offset))
1194 return false;
1195
1196 // Insert the string at the end of the string_pool
1197 // We don't care about order - updating string offsets in tokens would be complicated and unlikely helpful.
1198
1199 doc->tokens[token_pos + offset] = { type, (float)doc->string_pool.size() };
1200 std::copy(data.begin(), data.end(), std::back_inserter(doc->string_pool));
1201 doc->string_pool.push_back('\0');
1202 return true;
1203}
1204
1205bool GenericDocContext::setFloatData(int offset, TokenType type, float data)
1206{
1207 if (endOfFile(offset))
1208 return false;
1209
1210 doc->tokens[token_pos + offset] = { type, data };
1211 return true;
1212}
Central state/object manager and communications hub.
uint32_t BitMask_t
Definition BitFlags.h:7
static const int LINE_BUF_MAX
@ STRING_NAKED_CAPTURING_SPACES
@ NUMBER_SCIENTIFIC_STUB
@ NUMBER_SCIENTIFIC_STUB_MINUS
const char * EOL_STR
Generic text file parser.
@ CONSOLE_MSGTYPE_INFO
Generic message.
Definition Console.h:60
void putMessage(MessageArea area, MessageType type, std::string const &msg, std::string icon="")
Definition Console.cpp:103
@ CONSOLE_SYSTEM_ERROR
Definition Console.h:52
@ CONSOLE_SYSTEM_WARNING
Definition Console.h:53
Console * GetConsole()
GenericDocument & doc
PartialToken partial_tok_type
void BeginToken(const char c)
Ogre::DataStreamPtr datastream
void UpdateTitle(const char c)
void UpdateString(const char c)
const BitMask_t options
DocumentParser(GenericDocument &d, const BitMask_t opt, Ogre::DataStreamPtr ds)
void UpdateComment(const char c)
void UpdateGarbage(const char c)
void UpdateBool(const char c)
void UpdateKeyword(const char c)
void ProcessChar(const char c)
void FlushStringishToken(RoR::TokenType type)
std::vector< char > tok
void UpdateNumber(const char c)
void appendTokens(int count)
Appends a series of TokenType::NONE and sets Pos at the first one added; use setTok* functions to fil...
bool setFloatData(int offset, TokenType type, float data)
bool setStringData(int offset, TokenType type, const std::string &data)
bool insertToken(int offset=0)
Inserts TokenType::NONE;.
bool eraseToken(int offset=0)
bool endOfFile(int offset=0) const
bool isTokLineBreak(int offset=0) const
bool isTokComment(int offset=0) const
TokenType tokenType(int offset=0) const
static const BitMask_t OPTION_ALLOW_SLASH_COMMENTS
Allow comments starting with //.
std::vector< char > string_pool
static const BitMask_t OPTION_ALLOW_SEPARATOR_COLON
Allow ':' as separator between tokens.
virtual bool saveToResource(std::string resource_name, std::string resource_group_name)
virtual void loadFromDataStream(Ogre::DataStreamPtr datastream, BitMask_t options=0)
virtual void saveToDataStream(Ogre::DataStreamPtr datastream)
virtual bool loadFromResource(std::string resource_name, std::string resource_group_name, BitMask_t options=0)
static const BitMask_t OPTION_FIRST_LINE_IS_TITLE
First non-empty & non-comment line is a naked string with spaces.
static const BitMask_t OPTION_ALLOW_NAKED_STRINGS
Allow strings without quotes, for backwards compatibility.
static const BitMask_t OPTION_ALLOW_SEPARATOR_EQUALS
Allow '=' as separator between tokens.
std::vector< Token > tokens
static const BitMask_t OPTION_ALLOW_HASH_COMMENTS
Allow comments starting with #.
static const BitMask_t OPTION_ALLOW_BRACED_KEYWORDS
Allow INI-like '[keyword]' tokens.
static const BitMask_t OPTION_PARENTHESES_CAPTURE_SPACES
If non-empty NAKED string encounters '(', following spaces will be captured until matching ')' is fou...