// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
// // CollectedHeap // SharedHeap // GenCollectedHeap // G1CollectedHeap // ParallelScavengeHeap // class CollectedHeap : public CHeapObj { friend class VMStructs; friend class IsGCActiveMark; // Block structured external access to _is_gc_active friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe #ifdef ASSERT static int _fire_out_of_memory_count; #endif // Used for filler objects (static, but initialized in ctor). static size_t _filler_array_max_size; GCHeapLog* _gc_heap_log; // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used bool _defer_initial_card_mark; protected: MemRegion _reserved; BarrierSet* _barrier_set; bool _is_gc_active; uint _n_par_threads; unsigned int _total_collections; // ... started unsigned int _total_full_collections; // ... started NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) // Reason for current garbage collection. Should be set to // a value reflecting no collection between collections. GCCause::Cause _gc_cause; GCCause::Cause _gc_lastcause; PerfStringVariable* _perf_gc_cause; PerfStringVariable* _perf_gc_lastcause; // Constructor CollectedHeap(); // Do common initializations that must follow instance construction, // for example, those needing virtual calls. // This code could perhaps be moved into initialize() but would // be slightly more awkward because we want the latter to be a // pure virtual. void pre_initialize(); // Create a new tlab. All TLAB allocations must go through this. virtual HeapWord* allocate_new_tlab(size_t size); // Accumulate statistics on all tlabs. virtual void accumulate_statistics_all_tlabs(); // Reinitialize tlabs before resuming mutators. virtual void resize_all_tlabs(); // Allocate from the current thread's TLAB, with broken-out slow path. inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); // Like allocate_init, but the block returned by a successful allocation // is guaranteed initialized to zeros. inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); // Same as common_mem version, except memory is allocated in the permanent area // If there is no permanent area, revert to common_mem_allocate_noinit inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS); // Same as common_mem version, except memory is allocated in the permanent area // If there is no permanent area, revert to common_mem_allocate_init inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS); // Helper functions for (VM) allocation. inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj, size_t size); inline static void post_allocation_setup_no_klass_install(KlassHandle klass, HeapWord* objPtr, size_t size); inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj, size_t size); inline static void post_allocation_setup_array(KlassHandle klass, HeapWord* obj, size_t size, int length); // Clears an allocated object. inline static void init_obj(HeapWord* obj, size_t size); // Filler object utilities. static inline size_t filler_array_hdr_size(); static inline size_t filler_array_min_size(); DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) // Fill with a single array; caller must ensure filler_array_min_size() <= // words <= filler_array_max_size(). static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); // Fill with a single object (either an int array or a java.lang.Object). static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; debug_only(static void check_for_valid_allocation_state();) public: enum Name { Abstract, SharedHeap, GenCollectedHeap, ParallelScavengeHeap, G1CollectedHeap }; static inline size_t filler_array_max_size() { return _filler_array_max_size; } virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; } /** * Returns JNI error code JNI_ENOMEM if memory could not be allocated, * and JNI_OK on success. */ virtual jint initialize() = 0; // In many heaps, there will be a need to perform some initialization activities // after the Universe is fully formed, but before general heap allocation is allowed. // This is the correct place to place such initialization methods. virtual void post_initialize() = 0; MemRegion reserved_region() const { return _reserved; } address base() const { return (address)reserved_region().start(); } // Future cleanup here. The following functions should specify bytes or // heapwords as part of their signature. virtual size_t capacity() const = 0; virtual size_t used() const = 0; // Return "true" if the part of the heap that allocates Java // objects has reached the maximal committed limit that it can // reach, without a garbage collection. virtual bool is_maximal_no_gc() const = 0; virtual size_t permanent_capacity() const = 0; virtual size_t permanent_used() const = 0; // Support for java.lang.Runtime.maxMemory(): return the maximum amount of // memory that the vm could make available for storing 'normal' java objects. // This is based on the reserved address space, but should not include space // that the vm uses internally for bookkeeping or temporary storage (e.g., // perm gen space or, in the case of the young gen, one of the survivor // spaces). virtual size_t max_capacity() const = 0; // Returns "TRUE" if "p" points into the reserved area of the heap. bool is_in_reserved(const void* p) const { return _reserved.contains(p); } bool is_in_reserved_or_null(const void* p) const { return p == NULL || is_in_reserved(p); } // Returns "TRUE" iff "p" points into the committed areas of the heap. // Since this method can be expensive in general, we restrict its // use to assertion checking only. virtual bool is_in(const void* p) const = 0; bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); } // Let's define some terms: a "closed" subset of a heap is one that // // 1) contains all currently-allocated objects, and // // 2) is closed under reference: no object in the closed subset // references one outside the closed subset. // // Membership in a heap's closed subset is useful for assertions. // Clearly, the entire heap is a closed subset, so the default // implementation is to use "is_in_reserved". But this may not be too // liberal to perform useful checking. Also, the "is_in" predicate // defines a closed subset, but may be too expensive, since "is_in" // verifies that its argument points to an object head. The // "closed_subset" method allows a heap to define an intermediate // predicate, allowing more precise checking than "is_in_reserved" at // lower cost than "is_in." // One important case is a heap composed of disjoint contiguous spaces, // such as the Garbage-First collector. Such heaps have a convenient // closed subset consisting of the allocated portions of those // contiguous spaces. // Return "TRUE" iff the given pointer points into the heap's defined // closed subset (which defaults to the entire heap). virtual bool is_in_closed_subset(const void* p) const { return is_in_reserved(p); } bool is_in_closed_subset_or_null(const void* p) const { return p == NULL || is_in_closed_subset(p); } // XXX is_permanent() and is_in_permanent() should be better named // to distinguish one from the other. // Returns "TRUE" if "p" is allocated as "permanent" data. // If the heap does not use "permanent" data, returns the same // value is_in_reserved() would return. // NOTE: this actually returns true if "p" is in reserved space // for the space not that it is actually allocated (i.e. in committed // space). If you need the more conservative answer use is_permanent(). virtual bool is_in_permanent(const void *p) const = 0; #ifdef ASSERT // Returns true if "p" is in the part of the // heap being collected. virtual bool is_in_partial_collection(const void *p) = 0; #endif bool is_in_permanent_or_null(const void *p) const { return p == NULL || is_in_permanent(p); } // Returns "TRUE" if "p" is in the committed area of "permanent" data. // If the heap does not use "permanent" data, returns the same // value is_in() would return. virtual bool is_permanent(const void *p) const = 0; bool is_permanent_or_null(const void *p) const { return p == NULL || is_permanent(p); } // An object is scavengable if its location may move during a scavenge. // (A scavenge is a GC which is not a full GC.) virtual bool is_scavengable(const void *p) = 0; // Returns "TRUE" if "p" is a method oop in the // current heap, with high probability. This predicate // is not stable, in general. bool is_valid_method(oop p) const; void set_gc_cause(GCCause::Cause v) { if (UsePerfData) { _gc_lastcause = _gc_cause; _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); _perf_gc_cause->set_value(GCCause::to_string(v)); } _gc_cause = v; } GCCause::Cause gc_cause() { return _gc_cause; } // Number of threads currently working on GC tasks. uint n_par_threads() { return _n_par_threads; } // May be overridden to set additional parallelism. virtual void set_par_threads(uint t) { _n_par_threads = t; }; // Preload classes into the shared portion of the heap, and then dump // that data to a file so that it can be loaded directly by another // VM (then terminate). virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); } // Allocate and initialize instances of Class static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS); // General obj/array allocation facilities. inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS); // Special obj/array allocation facilities. // Some heaps may want to manage "permanent" data uniquely. These default // to the general routines if the heap does not support such handling. inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS); // permanent_obj_allocate_no_klass_install() does not do the installation of // the klass pointer in the newly created object (as permanent_obj_allocate() // above does). This allows for a delay in the installation of the klass // pointer that is needed during the create of klassKlass's. The // method post_allocation_install_obj_klass() is used to install the // klass pointer. inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass, int size, TRAPS); inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj, int size); inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS); // Raw memory allocation facilities // The obj and array allocate methods are covers for these methods. // The permanent allocation method should default to mem_allocate if // permanent memory isn't supported. mem_allocate() should never be // called to allocate TLABs, only individual objects. virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) = 0; virtual HeapWord* permanent_mem_allocate(size_t size) = 0; // Utilities for turning raw memory into filler objects. // // min_fill_size() is the smallest region that can be filled. // fill_with_objects() can fill arbitrary-sized regions of the heap using // multiple objects. fill_with_object() is for regions known to be smaller // than the largest array of integers; it uses a single object to fill the // region and has slightly less overhead. static size_t min_fill_size() { return size_t(align_object_size(oopDesc::header_size())); } static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); static void fill_with_object(HeapWord* start, size_t words, bool zap = true); static void fill_with_object(MemRegion region, bool zap = true) { fill_with_object(region.start(), region.word_size(), zap); } static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { fill_with_object(start, pointer_delta(end, start), zap); } // Some heaps may offer a contiguous region for shared non-blocking // allocation, via inlined code (by exporting the address of the top and // end fields defining the extent of the contiguous allocation region.) // This function returns "true" iff the heap supports this kind of // allocation. (Default is "no".) virtual bool supports_inline_contig_alloc() const { return false; } // These functions return the addresses of the fields that define the // boundaries of the contiguous allocation area. (These fields should be // physically near to one another.) virtual HeapWord** top_addr() const { guarantee(false, "inline contiguous allocation not supported"); return NULL; } virtual HeapWord** end_addr() const { guarantee(false, "inline contiguous allocation not supported"); return NULL; } // Some heaps may be in an unparseable state at certain times between // collections. This may be necessary for efficient implementation of // certain allocation-related activities. Calling this function before // attempting to parse a heap ensures that the heap is in a parsable // state (provided other concurrent activity does not introduce // unparsability). It is normally expected, therefore, that this // method is invoked with the world stopped. // NOTE: if you override this method, make sure you call // super::ensure_parsability so that the non-generational // part of the work gets done. See implementation of // CollectedHeap::ensure_parsability and, for instance, // that of GenCollectedHeap::ensure_parsability(). // The argument "retire_tlabs" controls whether existing TLABs // are merely filled or also retired, thus preventing further // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); // Return an estimate of the maximum allocation that could be performed // without triggering any collection or expansion activity. In a // generational collector, for example, this is probably the largest // allocation that could be supported (without expansion) in the youngest // generation. It is "unsafe" because no locks are taken; the result // should be treated as an approximation, not a guarantee, for use in // heuristic resizing decisions. virtual size_t unsafe_max_alloc() = 0; // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods: // Returns "true" iff the heap supports thread-local allocation buffers. // The default is "no". virtual bool supports_tlab_allocation() const { return false; } // The amount of space available for thread-local allocation buffers. virtual size_t tlab_capacity(Thread *thr) const { guarantee(false, "thread-local allocation buffers not supported"); return 0; } // An estimate of the maximum allocation that could be performed // for thread-local allocation buffers without triggering any // collection or expansion activity. virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { guarantee(false, "thread-local allocation buffers not supported"); return 0; } // Can a compiler initialize a new object without store barriers? // This permission only extends from the creation of a new object // via a TLAB up to the first subsequent safepoint. If such permission // is granted for this heap type, the compiler promises to call // defer_store_barrier() below on any slow path allocation of // a new object for which such initializing store barriers will // have been elided. virtual bool can_elide_tlab_store_barriers() const = 0; // If a compiler is eliding store barriers for TLAB-allocated objects, // there is probably a corresponding slow path which can produce // an object allocated anywhere. The compiler's runtime support // promises to call this function on such a slow-path-allocated // object before performing initializations that have elided // store barriers. Returns new_obj, or maybe a safer copy thereof. virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj); // Answers whether an initializing store to a new object currently // allocated at the given address doesn't need a store // barrier. Returns "true" if it doesn't need an initializing // store barrier; answers "false" if it does. virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0; // If a compiler is eliding store barriers for TLAB-allocated objects, // we will be informed of a slow-path allocation by a call // to new_store_pre_barrier() above. Such a call precedes the // initialization of the object itself, and no post-store-barriers will // be issued. Some heap types require that the barrier strictly follows // the initializing stores. (This is currently implemented by deferring the // barrier until the next slow-path allocation or gc-related safepoint.) // This interface answers whether a particular heap type needs the card // mark to be thus strictly sequenced after the stores. virtual bool card_mark_must_follow_store() const = 0; // If the CollectedHeap was asked to defer a store barrier above, // this informs it to flush such a deferred store barrier to the // remembered set. virtual void flush_deferred_store_barrier(JavaThread* thread); // Can a compiler elide a store barrier when it writes // a permanent oop into the heap? Applies when the compiler // is storing x to the heap, where x->is_perm() is true. virtual bool can_elide_permanent_oop_store_barriers() const = 0; // Does this heap support heap inspection (+PrintClassHistogram?) virtual bool supports_heap_inspection() const = 0; // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. virtual void collect(GCCause::Cause cause) = 0; // This interface assumes that it's being called by the // vm thread. It collects the heap assuming that the // heap lock is already held and that we are executing in // the context of the vm thread. virtual void collect_as_vm_thread(GCCause::Cause cause) = 0; // Returns the barrier set for this heap BarrierSet* barrier_set() { return _barrier_set; } // Returns "true" iff there is a stop-world GC in progress. (I assume // that it should answer "false" for the concurrent part of a concurrent // collector -- dld). bool is_gc_active() const { return _is_gc_active; } // Total number of GC collections (started) unsigned int total_collections() const { return _total_collections; } unsigned int total_full_collections() const { return _total_full_collections;} // Increment total number of GC collections (started) // Should be protected but used by PSMarkSweep - cleanup for 1.4.2 void increment_total_collections(bool full = false) { _total_collections++; if (full) { increment_total_full_collections(); } } void increment_total_full_collections() { _total_full_collections++; } // Return the AdaptiveSizePolicy for the heap. virtual AdaptiveSizePolicy* size_policy() = 0; // Return the CollectorPolicy for the heap virtual CollectorPolicy* collector_policy() const = 0; // Iterate over all the ref-containing fields of all objects, calling // "cl.do_oop" on each. This includes objects in permanent memory. virtual void oop_iterate(OopClosure* cl) = 0; // Iterate over all objects, calling "cl.do_object" on each. // This includes objects in permanent memory. virtual void object_iterate(ObjectClosure* cl) = 0; // Similar to object_iterate() except iterates only // over live objects. virtual void safe_object_iterate(ObjectClosure* cl) = 0; // Behaves the same as oop_iterate, except only traverses // interior pointers contained in permanent memory. If there // is no permanent memory, does nothing. virtual void permanent_oop_iterate(OopClosure* cl) = 0; // Behaves the same as object_iterate, except only traverses // object contained in permanent memory. If there is no // permanent memory, does nothing. virtual void permanent_object_iterate(ObjectClosure* cl) = 0; // NOTE! There is no requirement that a collector implement these // functions. // // A CollectedHeap is divided into a dense sequence of "blocks"; that is, // each address in the (reserved) heap is a member of exactly // one block. The defining characteristic of a block is that it is // possible to find its size, and thus to progress forward to the next // block. (Blocks may be of different sizes.) Thus, blocks may // represent Java objects, or they might be free blocks in a // free-list-based heap (or subheap), as long as the two kinds are // distinguishable and the size of each is determinable. // Returns the address of the start of the "block" that contains the // address "addr". We say "blocks" instead of "object" since some heaps // may not pack objects densely; a chunk may either be an object or a // non-object. virtual HeapWord* block_start(const void* addr) const = 0; // Requires "addr" to be the start of a chunk, and returns its size. // "addr + size" is required to be the start of a new chunk, or the end // of the active area of the heap. virtual size_t block_size(const HeapWord* addr) const = 0; // Requires "addr" to be the start of a block, and returns "TRUE" iff // the block is an object. virtual bool block_is_obj(const HeapWord* addr) const = 0; // Returns the longest time (in ms) that has elapsed since the last // time that any part of the heap was examined by a garbage collection. virtual jlong millis_since_last_gc() = 0; // Perform any cleanup actions necessary before allowing a verification. virtual void prepare_for_verify() = 0; // Generate any dumps preceding or following a full gc void pre_full_gc_dump(); void post_full_gc_dump(); // Print heap information on the given outputStream. virtual void print_on(outputStream* st) const = 0; // The default behavior is to call print_on() on tty. virtual void print() const { print_on(tty); } // Print more detailed heap information on the given // outputStream. The default behaviour is to call print_on(). It is // up to each subclass to override it and add any additional output // it needs. virtual void print_extended_on(outputStream* st) const { print_on(st); } // Print all GC threads (other than the VM thread) // used by this heap. virtual void print_gc_threads_on(outputStream* st) const = 0; // The default behavior is to call print_gc_threads_on() on tty. void print_gc_threads() { print_gc_threads_on(tty); } // Iterator for all GC threads (other than VM thread) virtual void gc_threads_do(ThreadClosure* tc) const = 0; // Print any relevant tracing info that flags imply. // Default implementation does nothing. virtual void print_tracing_info() const = 0; // If PrintHeapAtGC is set call the appropriate routi void print_heap_before_gc() { if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } if (_gc_heap_log != NULL) { _gc_heap_log->log_heap_before(); } } void print_heap_after_gc() { if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } if (_gc_heap_log != NULL) { _gc_heap_log->log_heap_after(); } } // Allocate GCHeapLog during VM startup static void initialize_heap_log(); // Heap verification virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0; // Non product verification and debugging. #ifndef PRODUCT // Support for PromotionFailureALot. Return true if it's time to cause a // promotion failure. The no-argument version uses // this->_promotion_failure_alot_count as the counter. inline bool promotion_should_fail(volatile size_t* count); inline bool promotion_should_fail(); // Reset the PromotionFailureALot counters. Should be called at the end of a // GC in which promotion failure ocurred. inline void reset_promotion_should_fail(volatile size_t* count); inline void reset_promotion_should_fail(); #endif // #ifndef PRODUCT #ifdef ASSERT static int fired_fake_oom() { return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt); } #endif public: // This is a convenience method that is used in cases where // the actual number of GC worker threads is not pertinent but // only whether there more than 0. Use of this method helps // reduce the occurrence of ParallelGCThreads to uses where the // actual number may be germane. static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; } /////////////// Unit tests /////////////// NOT_PRODUCT(static void test_is_in();) };
1、https://en.wikipedia.org/wiki/Memory_management#HEAP
相关推荐
例如,`src/hotspot/share/runtime`目录下的`G1GarbageCollector.cpp`和`G1CollectedHeap.cpp`文件涉及到了G1垃圾收集器的实现,它是Java 8中默认的垃圾收集器,提供了更高效的内存管理。 总的来说,通过研究...
华普微四通道数字隔离器,替换纳芯微,川土微
【资源说明】 基于区块链的分级诊疗数据共享系统全部资料+详细文档.zip 【备注】 1、该项目是个人高分项目源码,已获导师指导认可通过,答辩评审分达到95分 2、该资源内项目代码都经过测试运行成功,功能ok的情况下才上传的,请放心下载使用! 3、本项目适合计算机相关专业(人工智能、通信工程、自动化、电子信息、物联网等)的在校学生、老师或者企业员工下载使用,也可作为毕业设计、课程设计、作业、项目初期立项演示等,当然也适合小白学习进阶。 4、如果基础还行,可以在此代码基础上进行修改,以实现其他功能,也可直接用于毕设、课设、作业等。 欢迎下载,沟通交流,互相学习,共同进步!
sql注入
1.版本:matlab2014/2019a/2024a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。 替换数据可以直接使用,注释清楚,适合新手
微信小程序“仁怀酱酒宝”是一款专门针对酒类销售的商城模板,为开发者和商家提供了便捷的在线销售平台。这款源码集成了完整的商城功能,包括商品展示、购物车、订单管理、支付系统等,适合想要快速搭建酒类电商平台的企业或个人。以下是基于这个主题的详细知识点: 1. **微信小程序开发**: - 微信小程序是腾讯公司推出的一种轻量级应用开发框架,可在微信内运行,无需下载安装,方便用户快速访问。 - 开发微信小程序需要掌握WXML(微信小程序标记语言)和WXSS(微信小程序样式语言),以及JavaScript进行业务逻辑处理。 2. **商城模板**: - 商城模板是预先设计和开发好的电子商务平台,提供基础的购物流程和界面布局,帮助开发者快速构建在线商店。 - “仁怀酱酒宝”作为酒类商城模板,其设计可能包含商品分类、品牌展示、促销活动、用户评价等功能模块。 3. **源码**: - 源码是程序的原始代码,可以被开发者直接修改和扩展,以便适应特定需求。 - 提供的源码包含了整个小程序的结构和逻辑,包括前端页面代码、后端接口调用、数据库交互等。 4. **源码导入教程**: - “源码导入视频教程
HengCe-18900-2024-2030中国鱼子酱市场现状研究分析与发展前景预测报告-样本.docx
基于Django实现校园智能点餐系统源码+数据库(高分期末大作业),个人经导师指导并认可通过的98分大作业设计项目,主要针对计算机相关专业的正在做课程设计、期末大作业的学生和需要项目实战练习的学习者。 基于Django实现校园智能点餐系统源码+数据库(高分期末大作业)基于Django实现校园智能点餐系统源码+数据库(高分期末大作业),个人经导师指导并认可通过的98分大作业设计项目,主要针对计算机相关专业的正在做课程设计、期末大作业的学生和需要项目实战练习的学习者。基于Django实现校园智能点餐系统源码+数据库(高分期末大作业),个人经导师指导并认可通过的98分大作业设计项目,主要针对计算机相关专业的正在做课程设计、期末大作业的学生和需要项目实战练习的学习者。基于Django实现校园智能点餐系统源码+数据库(高分期末大作业),个人经导师指导并认可通过的98分大作业设计项目,主要针对计算机相关专业的正在做课程设计、期末大作业的学生和需要项目实战练习的学习者。 基于Django实现校园智能点餐系统源码+数据库(高分期末大作业),个人经导师指导并认可通过的98分大作业设计项目,主要针对计
IMG_1995.jpg
我的职业生涯规划书——杜默昕.pages
免责声明 此教程为纯技术分享!本教程的目的决不是为那些怀有不良动机的人提供及技术支持!也不承担因为技术被滥用所产生的连带责任!本教程的目的在于最大限度地唤醒大家对网络安全的重视,并采取相应的安全措施,从而减少由网络安全而带来的经济损失。所有的样本和工具仅供学习使用,特此声明学习样本和作业样本都不会对计算机设备造成破坏,请在安全的环境下运行,任何使用工具和样本进行计算机设备破坏的,所产生的责任与圈主无关!下载样本和工具默认同意此声明!
基于python神经网络分类难度的量化策略源码(通过识别和优先考虑关键路径来分配更高精度,降低计算开销) 【项目介绍】 一种基于神经网络分类难度的量化策略,旨在通过识别和优先考虑关键路径(对特定类别输出至关重要的神经元和滤波器)来分配更高精度,从而在保持模型准确性的同时降低计算开销。 Main Function Points 评估每个神经元和滤波器对特定类别的重要性,并构建关键路径。 计算量化对整体模型性能的影响,并采用搜索算法确定最佳比特宽度配置。 通过知识蒸馏进一步优化量化模型,以恢复任何丢失的准确性。 Technology Stack PyTorch 神经网络量化
硬件识别与通信:显卡驱动包含了 GTX 1080 TI的硬件设备信息,使得操作系统能够准确识别显卡,并与之建立通信桥梁,实现数据的正常传输。若没有安装正确的驱动程序,操作系统将无法充分发挥显卡的功能,甚至可能无法识别显卡的存在 。 性能优化与提升:NVIDIA 会不断优化驱动程序,以充分挖掘 GTX 1080 的性能潜力。通过对显卡核心频率、显存频率、渲染管线等参数的精细调整,以及对图形处理算法的优化,驱动程序能够显著提升显卡在各种应用场景下的性能表现,如游戏中的帧率提升、专业图形软件中的渲染速度加快等。 功能启用与扩展:安装驱动程序后,可以启用 GTX 1080 TI 的多项功能,如 2D 和 3D 加速、多显示器支持、硬件视频解码加速等。这些功能的正常启用,能够为用户带来更加流畅的视觉体验和更高效的工作效率。 游戏与应用程序兼容性:许多新推出的游戏和专业图形应用程序在开发过程中会针对最新的显卡驱动进行优化和测试。因此,及时更新 NVIDIA GTX 1080 TI 的驱动程序,有助于确保这些游戏和应用程序能够在显卡上稳定运行,并获得最佳的兼容性和性能表现。
数据来源:census 提供多个版本的组合,方便您各种场合和数据分析软件的应用: 其中, csv格式是格式化的文本数据文件,适用于excel、stata、spss等软件直接导入应用。 dta格式是stata软件的版本,spss也能导入应用。 csv_dta格式csv、dta两个版本的打包压缩文件数据包。 数据来源:census
超声程序随心所欲win7版exe安装包
【资源说明】 基于hyperledger的区块链超市管理系统全部资料+详细文档.zip 【备注】 1、该项目是个人高分项目源码,已获导师指导认可通过,答辩评审分达到95分 2、该资源内项目代码都经过测试运行成功,功能ok的情况下才上传的,请放心下载使用! 3、本项目适合计算机相关专业(人工智能、通信工程、自动化、电子信息、物联网等)的在校学生、老师或者企业员工下载使用,也可作为毕业设计、课程设计、作业、项目初期立项演示等,当然也适合小白学习进阶。 4、如果基础还行,可以在此代码基础上进行修改,以实现其他功能,也可直接用于毕设、课设、作业等。 欢迎下载,沟通交流,互相学习,共同进步!
适配Ubuntu18.04版本
bpi flash读ID程序
包含了openpose用到的pose、face、hand 所有用到的模型,已经按照实际所需目录结构存放。