1353 // Initialize the space for the BufferBlob used to find and verify
1354 // instruction size in MachNode::emit_size()
1355 init_scratch_buffer_blob(const_req);
1356 }
1357
1358 CodeBuffer* PhaseOutput::init_buffer() {
1359 int stub_req = _buf_sizes._stub;
1360 int code_req = _buf_sizes._code;
1361 int const_req = _buf_sizes._const;
1362
1363 int pad_req = NativeCall::byte_size();
1364
1365 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1366 stub_req += bs->estimate_stub_size();
1367
1368 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1369 // class HandlerImpl is platform-specific and defined in the *.ad files.
1370 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1371 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1372 stub_req += MAX_stubs_size; // ensure per-stub margin
1373 code_req += MAX_inst_size; // ensure per-instruction margin
1374
1375 if (StressCodeBuffers)
1376 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1377
1378 int total_req =
1379 const_req +
1380 code_req +
1381 pad_req +
1382 stub_req +
1383 exception_handler_req +
1384 deopt_handler_req; // deopt handler
1385
1386 if (C->has_method_handle_invokes())
1387 total_req += deopt_handler_req; // deopt MH handler
1388
1389 CodeBuffer* cb = code_buffer();
1390 cb->set_const_section_alignment(constant_table().alignment());
1391 cb->initialize(total_req, _buf_sizes._reloc);
1392
1393 // Have we run out of code space?
1550 int padding = mach->compute_padding(current_offset);
1551 // Make sure safepoint node for polling is distinct from a call's
1552 // return by adding a nop if needed.
1553 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1554 padding = nop_size;
1555 }
1556 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1557 current_offset == last_avoid_back_to_back_offset) {
1558 // Avoid back to back some instructions.
1559 padding = nop_size;
1560 }
1561
1562 if (padding > 0) {
1563 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1564 int nops_cnt = padding / nop_size;
1565 MachNode *nop = new MachNopNode(nops_cnt);
1566 block->insert_node(nop, j++);
1567 last_inst++;
1568 C->cfg()->map_node_to_block(nop, block);
1569 // Ensure enough space.
1570 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1571 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1572 C->record_failure("CodeCache is full");
1573 return;
1574 }
1575 nop->emit(masm, C->regalloc());
1576 masm->code()->flush_bundle(true);
1577 current_offset = masm->offset();
1578 }
1579
1580 bool observe_safepoint = is_sfn;
1581 // Remember the start of the last call in a basic block
1582 if (is_mcall) {
1583 MachCallNode *mcall = mach->as_MachCall();
1584
1585 // This destination address is NOT PC-relative
1586 mcall->method_set((intptr_t)mcall->entry_point());
1587
1588 // Save the return address
1589 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1590
1679 Label *blkLabel = &blk_labels[block_num];
1680 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1681 }
1682 }
1683 }
1684 } else if (!n->is_Proj()) {
1685 // Remember the beginning of the previous instruction, in case
1686 // it's followed by a flag-kill and a null-check. Happens on
1687 // Intel all the time, with add-to-memory kind of opcodes.
1688 previous_offset = current_offset;
1689 }
1690
1691 // Not an else-if!
1692 // If this is a trap based cmp then add its offset to the list.
1693 if (mach->is_TrapBasedCheckNode()) {
1694 inct_starts[inct_cnt++] = current_offset;
1695 }
1696 }
1697
1698 // Verify that there is sufficient space remaining
1699 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1700 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1701 C->record_failure("CodeCache is full");
1702 return;
1703 }
1704
1705 // Save the offset for the listing
1706 #if defined(SUPPORT_OPTO_ASSEMBLY)
1707 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1708 node_offsets[n->_idx] = masm->offset();
1709 }
1710 #endif
1711 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1712
1713 // "Normal" instruction case
1714 DEBUG_ONLY(uint instr_offset = masm->offset());
1715 n->emit(masm, C->regalloc());
1716 current_offset = masm->offset();
1717
1718 // Above we only verified that there is enough space in the instruction section.
1719 // However, the instruction may emit stubs that cause code buffer expansion.
3336
3337
3338 //-----------------------scratch_emit_size-------------------------------------
3339 // Helper function that computes size by emitting code
3340 uint PhaseOutput::scratch_emit_size(const Node* n) {
3341 // Start scratch_emit_size section.
3342 set_in_scratch_emit_size(true);
3343
3344 // Emit into a trash buffer and count bytes emitted.
3345 // This is a pretty expensive way to compute a size,
3346 // but it works well enough if seldom used.
3347 // All common fixed-size instructions are given a size
3348 // method by the AD file.
3349 // Note that the scratch buffer blob and locs memory are
3350 // allocated at the beginning of the compile task, and
3351 // may be shared by several calls to scratch_emit_size.
3352 // The allocation of the scratch buffer blob is particularly
3353 // expensive, since it has to grab the code cache lock.
3354 BufferBlob* blob = this->scratch_buffer_blob();
3355 assert(blob != nullptr, "Initialize BufferBlob at start");
3356 assert(blob->size() > MAX_inst_size, "sanity");
3357 relocInfo* locs_buf = scratch_locs_memory();
3358 address blob_begin = blob->content_begin();
3359 address blob_end = (address)locs_buf;
3360 assert(blob->contains(blob_end), "sanity");
3361 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3362 buf.initialize_consts_size(_scratch_const_size);
3363 buf.initialize_stubs_size(MAX_stubs_size);
3364 assert(locs_buf != nullptr, "sanity");
3365 int lsize = MAX_locs_size / 3;
3366 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3367 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3368 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3369 // Mark as scratch buffer.
3370 buf.consts()->set_scratch_emit();
3371 buf.insts()->set_scratch_emit();
3372 buf.stubs()->set_scratch_emit();
3373
3374 // Do the emission.
3375
3376 Label fakeL; // Fake label for branch instructions.
3381 masm.bind(fakeL);
3382 if (is_branch) {
3383 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3384 n->as_MachBranch()->label_set(&fakeL, 0);
3385 }
3386 n->emit(&masm, C->regalloc());
3387
3388 // Emitting into the scratch buffer should not fail
3389 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3390
3391 if (is_branch) // Restore label.
3392 n->as_MachBranch()->label_set(saveL, save_bnum);
3393
3394 // End scratch_emit_size section.
3395 set_in_scratch_emit_size(false);
3396
3397 return buf.insts_size();
3398 }
3399
3400 void PhaseOutput::install() {
3401 if (!C->should_install_code()) {
3402 return;
3403 } else if (C->stub_function() != nullptr) {
3404 install_stub(C->stub_name());
3405 } else {
3406 install_code(C->method(),
3407 C->entry_bci(),
3408 CompileBroker::compiler2(),
3409 C->has_unsafe_access(),
3410 SharedRuntime::is_wide_vector(C->max_vector_size()));
3411 }
3412 }
3413
3414 void PhaseOutput::install_code(ciMethod* target,
3415 int entry_bci,
3416 AbstractCompiler* compiler,
3417 bool has_unsafe_access,
3418 bool has_wide_vectors) {
3419 // Check if we want to skip execution of all compiled code.
3420 {
3421 #ifndef PRODUCT
3422 if (OptoNoExecute) {
3423 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3424 return;
3425 }
3426 #endif
3427 Compile::TracePhase tp(_t_registerMethod);
3428
3433 if (!target->is_static()) {
3434 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3435 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3436 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3437 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3438 }
3439 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3440 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3441 }
3442
3443 C->env()->register_method(target,
3444 entry_bci,
3445 &_code_offsets,
3446 _orig_pc_slot_offset_in_bytes,
3447 code_buffer(),
3448 frame_size_in_words(),
3449 oop_map_set(),
3450 &_handler_table,
3451 inc_table(),
3452 compiler,
3453 has_unsafe_access,
3454 SharedRuntime::is_wide_vector(C->max_vector_size()),
3455 C->has_monitors(),
3456 C->has_scoped_access(),
3457 0);
3458
3459 if (C->log() != nullptr) { // Print code cache state into compiler log
3460 C->log()->code_cache_state();
3461 }
3462 }
3463 }
3464 void PhaseOutput::install_stub(const char* stub_name) {
3465 // Entry point will be accessed using stub_entry_point();
3466 if (code_buffer() == nullptr) {
3467 Matcher::soft_match_failure();
3468 } else {
3469 if (PrintAssembly && (WizardMode || Verbose))
3470 tty->print_cr("### Stub::%s", stub_name);
3471
3472 if (!C->failing()) {
3473 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3474
3475 // Make the NMethod
3476 // For now we mark the frame as never safe for profile stackwalking
3477 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3478 code_buffer(),
3479 CodeOffsets::frame_never_safe,
3480 // _code_offsets.value(CodeOffsets::Frame_Complete),
3481 frame_size_in_words(),
3646 // Dump the exception table as well
3647 if( n->is_Catch() && (Verbose || WizardMode) ) {
3648 // Print the exception table for this offset
3649 _handler_table.print_subtable_for(pc);
3650 }
3651 st->bol(); // Make sure we start on a new line
3652 }
3653 st->cr(); // one empty line between blocks
3654 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3655 } // End of per-block dump
3656
3657 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3658 }
3659 #endif
3660
3661 #ifndef PRODUCT
3662 void PhaseOutput::print_statistics() {
3663 Scheduling::print_statistics();
3664 }
3665 #endif
|
1353 // Initialize the space for the BufferBlob used to find and verify
1354 // instruction size in MachNode::emit_size()
1355 init_scratch_buffer_blob(const_req);
1356 }
1357
1358 CodeBuffer* PhaseOutput::init_buffer() {
1359 int stub_req = _buf_sizes._stub;
1360 int code_req = _buf_sizes._code;
1361 int const_req = _buf_sizes._const;
1362
1363 int pad_req = NativeCall::byte_size();
1364
1365 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1366 stub_req += bs->estimate_stub_size();
1367
1368 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1369 // class HandlerImpl is platform-specific and defined in the *.ad files.
1370 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1371 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1372 stub_req += MAX_stubs_size; // ensure per-stub margin
1373 code_req += max_inst_size(); // ensure per-instruction margin
1374
1375 if (StressCodeBuffers)
1376 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1377
1378 int total_req =
1379 const_req +
1380 code_req +
1381 pad_req +
1382 stub_req +
1383 exception_handler_req +
1384 deopt_handler_req; // deopt handler
1385
1386 if (C->has_method_handle_invokes())
1387 total_req += deopt_handler_req; // deopt MH handler
1388
1389 CodeBuffer* cb = code_buffer();
1390 cb->set_const_section_alignment(constant_table().alignment());
1391 cb->initialize(total_req, _buf_sizes._reloc);
1392
1393 // Have we run out of code space?
1550 int padding = mach->compute_padding(current_offset);
1551 // Make sure safepoint node for polling is distinct from a call's
1552 // return by adding a nop if needed.
1553 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1554 padding = nop_size;
1555 }
1556 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1557 current_offset == last_avoid_back_to_back_offset) {
1558 // Avoid back to back some instructions.
1559 padding = nop_size;
1560 }
1561
1562 if (padding > 0) {
1563 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1564 int nops_cnt = padding / nop_size;
1565 MachNode *nop = new MachNopNode(nops_cnt);
1566 block->insert_node(nop, j++);
1567 last_inst++;
1568 C->cfg()->map_node_to_block(nop, block);
1569 // Ensure enough space.
1570 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1571 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1572 C->record_failure("CodeCache is full");
1573 return;
1574 }
1575 nop->emit(masm, C->regalloc());
1576 masm->code()->flush_bundle(true);
1577 current_offset = masm->offset();
1578 }
1579
1580 bool observe_safepoint = is_sfn;
1581 // Remember the start of the last call in a basic block
1582 if (is_mcall) {
1583 MachCallNode *mcall = mach->as_MachCall();
1584
1585 // This destination address is NOT PC-relative
1586 mcall->method_set((intptr_t)mcall->entry_point());
1587
1588 // Save the return address
1589 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1590
1679 Label *blkLabel = &blk_labels[block_num];
1680 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1681 }
1682 }
1683 }
1684 } else if (!n->is_Proj()) {
1685 // Remember the beginning of the previous instruction, in case
1686 // it's followed by a flag-kill and a null-check. Happens on
1687 // Intel all the time, with add-to-memory kind of opcodes.
1688 previous_offset = current_offset;
1689 }
1690
1691 // Not an else-if!
1692 // If this is a trap based cmp then add its offset to the list.
1693 if (mach->is_TrapBasedCheckNode()) {
1694 inct_starts[inct_cnt++] = current_offset;
1695 }
1696 }
1697
1698 // Verify that there is sufficient space remaining
1699 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1700 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1701 C->record_failure("CodeCache is full");
1702 return;
1703 }
1704
1705 // Save the offset for the listing
1706 #if defined(SUPPORT_OPTO_ASSEMBLY)
1707 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1708 node_offsets[n->_idx] = masm->offset();
1709 }
1710 #endif
1711 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1712
1713 // "Normal" instruction case
1714 DEBUG_ONLY(uint instr_offset = masm->offset());
1715 n->emit(masm, C->regalloc());
1716 current_offset = masm->offset();
1717
1718 // Above we only verified that there is enough space in the instruction section.
1719 // However, the instruction may emit stubs that cause code buffer expansion.
3336
3337
3338 //-----------------------scratch_emit_size-------------------------------------
3339 // Helper function that computes size by emitting code
3340 uint PhaseOutput::scratch_emit_size(const Node* n) {
3341 // Start scratch_emit_size section.
3342 set_in_scratch_emit_size(true);
3343
3344 // Emit into a trash buffer and count bytes emitted.
3345 // This is a pretty expensive way to compute a size,
3346 // but it works well enough if seldom used.
3347 // All common fixed-size instructions are given a size
3348 // method by the AD file.
3349 // Note that the scratch buffer blob and locs memory are
3350 // allocated at the beginning of the compile task, and
3351 // may be shared by several calls to scratch_emit_size.
3352 // The allocation of the scratch buffer blob is particularly
3353 // expensive, since it has to grab the code cache lock.
3354 BufferBlob* blob = this->scratch_buffer_blob();
3355 assert(blob != nullptr, "Initialize BufferBlob at start");
3356 assert(blob->size() > max_inst_size(), "sanity");
3357 relocInfo* locs_buf = scratch_locs_memory();
3358 address blob_begin = blob->content_begin();
3359 address blob_end = (address)locs_buf;
3360 assert(blob->contains(blob_end), "sanity");
3361 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3362 buf.initialize_consts_size(_scratch_const_size);
3363 buf.initialize_stubs_size(MAX_stubs_size);
3364 assert(locs_buf != nullptr, "sanity");
3365 int lsize = MAX_locs_size / 3;
3366 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3367 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3368 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3369 // Mark as scratch buffer.
3370 buf.consts()->set_scratch_emit();
3371 buf.insts()->set_scratch_emit();
3372 buf.stubs()->set_scratch_emit();
3373
3374 // Do the emission.
3375
3376 Label fakeL; // Fake label for branch instructions.
3381 masm.bind(fakeL);
3382 if (is_branch) {
3383 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3384 n->as_MachBranch()->label_set(&fakeL, 0);
3385 }
3386 n->emit(&masm, C->regalloc());
3387
3388 // Emitting into the scratch buffer should not fail
3389 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3390
3391 if (is_branch) // Restore label.
3392 n->as_MachBranch()->label_set(saveL, save_bnum);
3393
3394 // End scratch_emit_size section.
3395 set_in_scratch_emit_size(false);
3396
3397 return buf.insts_size();
3398 }
3399
3400 void PhaseOutput::install() {
3401 if (C->should_install_code() && C->stub_function() != nullptr) {
3402 install_stub(C->stub_name());
3403 } else {
3404 install_code(C->method(),
3405 C->entry_bci(),
3406 CompilerThread::current()->compiler(),
3407 C->has_unsafe_access(),
3408 SharedRuntime::is_wide_vector(C->max_vector_size()));
3409 }
3410 }
3411
3412 void PhaseOutput::install_code(ciMethod* target,
3413 int entry_bci,
3414 AbstractCompiler* compiler,
3415 bool has_unsafe_access,
3416 bool has_wide_vectors) {
3417 // Check if we want to skip execution of all compiled code.
3418 {
3419 #ifndef PRODUCT
3420 if (OptoNoExecute) {
3421 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3422 return;
3423 }
3424 #endif
3425 Compile::TracePhase tp(_t_registerMethod);
3426
3431 if (!target->is_static()) {
3432 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3433 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3434 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3435 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3436 }
3437 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3438 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3439 }
3440
3441 C->env()->register_method(target,
3442 entry_bci,
3443 &_code_offsets,
3444 _orig_pc_slot_offset_in_bytes,
3445 code_buffer(),
3446 frame_size_in_words(),
3447 oop_map_set(),
3448 &_handler_table,
3449 inc_table(),
3450 compiler,
3451 C->has_clinit_barriers(),
3452 C->for_preload(),
3453 has_unsafe_access,
3454 SharedRuntime::is_wide_vector(C->max_vector_size()),
3455 C->has_monitors(),
3456 C->has_scoped_access(),
3457 0,
3458 C->should_install_code());
3459
3460 if (C->log() != nullptr) { // Print code cache state into compiler log
3461 C->log()->code_cache_state();
3462 }
3463 if (C->has_clinit_barriers()) {
3464 assert(C->for_preload(), "sanity");
3465 // Build second version of code without class initialization barriers
3466 if (C->env()->task()->compile_reason() == CompileTask::Reason_PrecompileForPreload) {
3467 // don't automatically precompile a barrier-free version unless explicitly asked
3468 } else {
3469 C->record_failure(C2Compiler::retry_no_clinit_barriers());
3470 }
3471 }
3472 }
3473 }
3474 void PhaseOutput::install_stub(const char* stub_name) {
3475 // Entry point will be accessed using stub_entry_point();
3476 if (code_buffer() == nullptr) {
3477 Matcher::soft_match_failure();
3478 } else {
3479 if (PrintAssembly && (WizardMode || Verbose))
3480 tty->print_cr("### Stub::%s", stub_name);
3481
3482 if (!C->failing()) {
3483 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3484
3485 // Make the NMethod
3486 // For now we mark the frame as never safe for profile stackwalking
3487 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3488 code_buffer(),
3489 CodeOffsets::frame_never_safe,
3490 // _code_offsets.value(CodeOffsets::Frame_Complete),
3491 frame_size_in_words(),
3656 // Dump the exception table as well
3657 if( n->is_Catch() && (Verbose || WizardMode) ) {
3658 // Print the exception table for this offset
3659 _handler_table.print_subtable_for(pc);
3660 }
3661 st->bol(); // Make sure we start on a new line
3662 }
3663 st->cr(); // one empty line between blocks
3664 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3665 } // End of per-block dump
3666
3667 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3668 }
3669 #endif
3670
3671 #ifndef PRODUCT
3672 void PhaseOutput::print_statistics() {
3673 Scheduling::print_statistics();
3674 }
3675 #endif
3676
3677 int PhaseOutput::max_inst_size() {
3678 if (AOTCodeCache::is_on_for_dump()) {
3679 // See the comment in output.hpp.
3680 return 16384;
3681 } else {
3682 return mainline_MAX_inst_size;
3683 }
3684 }
3685
3686 int PhaseOutput::max_inst_gcstub_size() {
3687 assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3688 return mainline_MAX_inst_size;
3689 }
|