Urban Planning Lecture Notes Pdf May 2026
def extract_text_from_pdf(self) -> str: """Extract text from PDF file""" text = "" with open(self.pdf_path, 'rb') as file: pdf_reader = PyPDF2.PdfReader(file) for page_num, page in enumerate(pdf_reader.pages): page_text = page.extract_text() self.pages_text.append( 'page_num': page_num + 1, 'text': page_text ) text += page_text + "\n" self.full_text = text return text
def _identify_focus_areas(self) -> List[str]: """Identify areas that need more attention based on complexity markers""" complexity_markers = [ 'important', 'crucial', 'essential', 'note that', 'remember', 'key point', 'significant', 'critical', 'fundamental' ] focus_areas = [] sentences = sent_tokenize(self.full_text) for sentence in sentences: for marker in complexity_markers: if marker in sentence.lower(): focus_areas.append(sentence[:100]) break return list(set(focus_areas))[:8] urban planning lecture notes pdf
def _show_questions(self): questions = self.analyzer.generate_study_questions() print("\n❓ STUDY QUESTIONS:") for i, q in enumerate(questions, 1): print(f"\ni. q['question']") print(f" 💡 Hint: q['hint']") def extract_text_from_pdf(self) ->
def extract_key_concepts(self) -> List[Dict]: """Extract and rank key urban planning concepts""" stop_words = set(stopwords.words('english')) # Urban planning specific terminology planning_terms = [ 'zoning', 'land use', 'transportation', 'infrastructure', 'sustainability', 'urban design', 'smart growth', 'new urbanism', 'gentrification', 'affordable housing', 'public space', 'transit-oriented development', 'mixed-use', 'walkability', 'green infrastructure', 'climate resilience', 'urban renewal', 'community engagement', 'comprehensive plan', 'subdivision', 'environmental impact', 'historic preservation', 'urban sprawl', 'density', 'parking', 'complete streets', 'placemaking' ] # Tokenize and find frequencies words = word_tokenize(self.full_text.lower()) words = [w for w in words if w.isalpha() and w not in stop_words] # Count frequencies of planning terms concept_counts = Counter() for term in planning_terms: count = self.full_text.lower().count(term) if count > 0: concept_counts[term] = count # Extract context for each concept concepts = [] for concept, count in concept_counts.most_common(20): # Find sentences containing the concept sentences = sent_tokenize(self.full_text) context_sentences = [s for s in sentences if concept.lower() in s.lower()] context = context_sentences[:2] if context_sentences else [] concepts.append( 'term': concept, 'frequency': count, 'context': context ) self.key_concepts = concepts return concepts q in enumerate(questions
def export_to_json(self, output_path: str): """Export all analysis results to JSON file""" output = 'metadata': 'source_file': self.pdf_path, 'total_pages': len(self.pages_text), 'total_words': len(self.full_text.split()) , 'summary': self.create_summary(), 'sections': self.sections, 'key_concepts': self.key_concepts, 'case_studies': self.case_studies, 'study_questions': self.generate_study_questions(), 'full_text_excerpt': self.full_text[:5000] # First 5000 chars with open(output_path, 'w', encoding='utf-8') as f: json.dump(output, f, indent=2, ensure_ascii=False) print(f"Analysis exported to output_path") class UrbanPlanningStudyAssistant: def init (self, analyzer: UrbanPlanningNotesAnalyzer): self.analyzer = analyzer