diff --git a/1_lab1.ipynb b/1_lab1.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1b5dc540e872d9354bbe405b2d540d8353ac6243 --- /dev/null +++ b/1_lab1.ipynb @@ -0,0 +1,311 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Welcome to the start of your adventure in Agentic AI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Are you ready for action??

\n", + " Have you completed all the setup steps in the setup folder?
\n", + " Have you checked out the guides in the guides folder?
\n", + " Well in that case, you're ready!!\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Treat these labs as a resource

\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### And please do remember to contact me if I can help\n", + "\n", + "And I love to connect: https://www.linkedin.com/in/eddonner/\n", + "\n", + "\n", + "### New to Notebooks like this one? Head over to the guides folder!\n", + "\n", + "Otherwise:\n", + "1. Click on the top right to Select a Kernel\n", + "2. Click in each \"cell\" below and press Shift+Enter to run\n", + "3. Enjoy!" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# First let's do an import\n", + "from dotenv import load_dotenv\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Next it's time to load the API keys into environment variables\n", + "\n", + "load_dotenv(override=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check the keys\n", + "\n", + "import os\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set - please head to the troubleshooting guide in the guides folder\")\n", + " \n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# And now - the all important import statement\n", + "# If you get an import error - head over to troubleshooting guide\n", + "\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# And now we'll create an instance of the OpenAI class\n", + "# If you're not sure what it means to create an instance of a class - head over to the guides folder!\n", + "# If you get a NameError - head over to the guides folder to learn about NameErrors\n", + "\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a list of messages in the familiar OpenAI format\n", + "\n", + "messages = [{\"role\": \"user\", \"content\": \"What is 2+2?\"}]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# And now call it! Any problems, head to the troubleshooting guide\n", + "\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages\n", + ")\n", + "\n", + "print(response.choices[0].message.content)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# And now - let's ask for a question:\n", + "\n", + "question = \"Please propose a hard, challenging question to assess someone's IQ. Respond only with the question.\"\n", + "messages = [{\"role\": \"user\", \"content\": question}]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ask it\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages\n", + ")\n", + "\n", + "question = response.choices[0].message.content\n", + "\n", + "print(question)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "# form a new messages list\n", + "messages = [{\"role\": \"user\", \"content\": question}]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Ask it again\n", + "\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages\n", + ")\n", + "\n", + "answer = response.choices[0].message.content\n", + "print(answer)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import Markdown, display\n", + "\n", + "display(Markdown(answer))\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Congratulations!\n", + "\n", + "That was a small, simple step in the direction of Agentic AI, with your new environment!\n", + "\n", + "Next time things get more interesting..." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Exercise

\n", + " Now try this commercial application:
\n", + " First ask the LLM to pick a business area that might be worth exploring for an Agentic AI opportunity.
\n", + " Then ask the LLM to present a pain-point in that industry - something challenging that might be ripe for an Agentic solution.
\n", + " Finally have 3 third LLM call propose the Agentic AI solution.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First create the messages:\n", + "\n", + "messages = [{\"role\": \"user\", \"content\": \"Something here\"}]\n", + "\n", + "# Then make the first call:\n", + "\n", + "response =\n", + "\n", + "# Then read the business idea:\n", + "\n", + "business_idea = response.\n", + "\n", + "# And repeat!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/2_lab2.ipynb b/2_lab2.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c379c3767dfe682d90b38e70acacdbf4000e9704 --- /dev/null +++ b/2_lab2.ipynb @@ -0,0 +1,1448 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Welcome to the Second Lab - Week 1, Day 3\n", + "\n", + "Today we will work with lots of models! This is a way to get comfortable with APIs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Important point - please read

\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations.

If you have time, I'd love it if you submit a PR for changes in the community_contributions folder - instructions in the resources. Also, if you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + "
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Start with imports - ask ChatGPT to explain any package that you don't know\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from anthropic import Anthropic\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Always remember to do this!\n", + "load_dotenv(override=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AI\n", + "DeepSeek API Key exists and begins sk-\n", + "Groq API Key exists and begins gsk_\n" + ] + } + ], + "source": [ + "# Print the key prefixes to help with any debugging\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", + "groq_api_key = os.getenv('GROQ_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set (and this is optional)\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:2]}\")\n", + "else:\n", + " print(\"Google API Key not set (and this is optional)\")\n", + "\n", + "if deepseek_api_key:\n", + " print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n", + "else:\n", + " print(\"DeepSeek API Key not set (and this is optional)\")\n", + "\n", + "if groq_api_key:\n", + " print(f\"Groq API Key exists and begins {groq_api_key[:4]}\")\n", + "else:\n", + " print(\"Groq API Key not set (and this is optional)\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "request = \"Please come up with a challenging, nuanced question that I can ask a number of LLMs to evaluate their intelligence. \"\n", + "request += \"Answer only with the question, no explanation.\"\n", + "messages = [{\"role\": \"user\", \"content\": request}]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'user',\n", + " 'content': 'Please come up with a challenging, nuanced question that I can ask a number of LLMs to evaluate their intelligence. Answer only with the question, no explanation.'}]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "messages" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "How would you analyze the ethical implications of using artificial intelligence in predictive policing, considering factors such as bias, accountability, and societal impact?\n" + ] + } + ], + "source": [ + "openai = OpenAI()\n", + "response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=messages,\n", + ")\n", + "question = response.choices[0].message.content\n", + "print(question)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "competitors = []\n", + "answers = []\n", + "messages = [{\"role\": \"user\", \"content\": question}]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves a multidimensional approach, considering several key factors such as bias, accountability, and societal impact. Here is a structured framework for this analysis:\n", + "\n", + "### 1. **Bias**\n", + "- **Data Bias**: AI systems rely heavily on historical data, which can perpetuate existing biases present in that data. If the data reflects racial or socioeconomic disparities in policing (e.g., over-policing in certain communities), the AI can reinforce these biases by predicting higher crime rates in these areas, leading to a self-fulfilling prophecy.\n", + "- **Algorithmic Bias**: The design of algorithms may also introduce biases if the developers unconsciously embed their own biases into the model. It's crucial to examine who created the algorithms and what assumptions were made during their development.\n", + "- **Mitigation Strategies**: Implementing techniques like algorithmic fairness audits and diverse training datasets can help reduce bias. Regularly assessing and adjusting algorithms as new data comes in can also be important.\n", + "\n", + "### 2. **Accountability**\n", + "- **Responsibility for Decisions**: If an AI system makes a predictive error that leads to wrongful arrest or civil liberties violations, determining accountability becomes complex. Clear lines of responsibility must be established—who is to blame: the algorithm developers, the law enforcement agency, or the policymakers who implemented the AI?\n", + "- **Transparency**: The opacity of many AI systems can hinder accountability. Stakeholders should demand transparency regarding how algorithms work and what data they use. This includes clear reporting on algorithmic decisions and their outcomes.\n", + "- **Human Oversight**: AI in predictive policing should not replace human judgment. Maintaining a system of human oversight can ensure that critical decisions, especially those impacting civil rights, are vetted through human interpretation and ethical considerations.\n", + "\n", + "### 3. **Societal Impact**\n", + "- **Civil Liberties**: The use of predictive policing can infringe on individuals' rights if it leads to excessive surveillance or profiling based on prediction rather than behavior. There's a fine line between preventive measures and civil rights violations.\n", + "- **Community Trust**: The deployment of AI in policing can affect community relationships with law enforcement. If communities perceive predictive policing as biased or unfair, it may lead to mistrust and decreased cooperation with law enforcement efforts.\n", + "- **Resource Allocation**: AI may skew resource allocation towards heavily policed communities, potentially exacerbating existing inequalities and diverting resources away from crime prevention and community investment in areas that may genuinely need them.\n", + "\n", + "### 4. **Ethical Frameworks**\n", + "- **Utilitarian Perspectives**: While predictive policing can potentially reduce crime detection and prevention, ethical evaluation must consider broader societal impacts, including the potential harm to affected communities.\n", + "- **Deontological Perspectives**: From a rights-based view, predictive policing must respect individual rights and freedoms, ensuring that law enforcement practices do not compromise the dignity and autonomy of individuals.\n", + "\n", + "### 5. **Public Engagement and Policy**\n", + "- **Community Consultation**: Engaging the community in discussions about the use of AI in policing can help bridge gaps and increase transparency. Public forums can provide a platform for feedback and concerns regarding predictive technologies.\n", + "- **Legislative Oversight**: Policymakers need to establish robust regulatory frameworks governing the use of predictive policing tools to safeguard civil liberties and ensure accountability and transparency throughout the process.\n", + "\n", + "### Conclusion\n", + "The ethical implications of using AI in predictive policing are complex and multifaceted. A careful, considerate approach that addresses bias, ensures accountability, and considers the broader societal impacts is critical for navigating the challenges posed by these technologies. Implementing safeguards and engaging with communities can help harness the benefits of AI while minimizing its harms." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# The API we know well\n", + "\n", + "model_name = \"gpt-4o-mini\"\n", + "\n", + "response = openai.chat.completions.create(model=model_name, messages=messages)\n", + "answer = response.choices[0].message.content\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# Ethics of AI in Predictive Policing\n", + "\n", + "Predictive policing using AI presents several significant ethical challenges:\n", + "\n", + "## Bias Concerns\n", + "- Historical police data often contains embedded biases against marginalized communities\n", + "- Algorithms may perpetuate or amplify these biases, creating harmful feedback loops\n", + "- Risk of technological laundering where human bias is hidden behind a veneer of algorithmic objectivity\n", + "\n", + "## Accountability Issues\n", + "- \"Black box\" algorithms create difficulties in understanding how predictions are generated\n", + "- Unclear responsibility chains between developers, police departments, and officers\n", + "- Questions about legal recourse for citizens wrongfully targeted\n", + "\n", + "## Societal Impact\n", + "- Potential erosion of presumption of innocence by targeting individuals based on statistical likelihood\n", + "- Risk of creating over-policed communities, reinforcing existing social inequalities\n", + "- Privacy implications of mass data collection and algorithmic surveillance\n", + "\n", + "Ethical implementation would require transparent algorithms, diverse training data, human oversight, regular auditing for bias, and community involvement in deployment decisions. The core question remains whether we can balance potential public safety benefits against risks to civil liberties and equal protection." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Anthropic has a slightly different API, and Max Tokens is required\n", + "\n", + "model_name = \"claude-3-7-sonnet-latest\"\n", + "\n", + "claude = Anthropic()\n", + "response = claude.messages.create(model=model_name, messages=messages, max_tokens=1000)\n", + "answer = response.content[0].text\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires a multi-faceted approach, considering not only the technology itself but also the complex social context in which it is deployed. Here's a breakdown of the key factors:\n", + "\n", + "**1. Bias:**\n", + "\n", + "* **Source Data Bias:** AI models are trained on historical data, which often reflects existing biases within the criminal justice system. If past policing practices disproportionately targeted certain communities (e.g., due to racial profiling, socioeconomic disparities), the AI will learn and perpetuate these biases. This can lead to:\n", + " * **Reinforcement of Existing Inequalities:** Predictive policing may reinforce discriminatory practices by disproportionately focusing resources on marginalized communities, leading to more arrests, and further skewing the data the AI is trained on, creating a self-fulfilling prophecy.\n", + " * **Bias Amplification:** AI algorithms can amplify subtle biases in the data that might be difficult for humans to detect, leading to even more discriminatory outcomes.\n", + " * **Example:** If a system is trained on arrest data showing higher drug crime rates in a specific neighborhood, it may predict higher crime rates in that neighborhood even if the underlying reason is simply increased police presence and enforcement in that area.\n", + "\n", + "* **Algorithmic Bias:** Even with seemingly unbiased data, bias can creep into the algorithm itself during the design and development phase. This can be due to:\n", + " * **Feature Selection:** Choosing specific variables to predict crime may inadvertently correlate with protected characteristics (e.g., race, ethnicity, socioeconomic status).\n", + " * **Model Design:** Certain algorithms might inherently be more prone to bias than others.\n", + " * **Thresholds and Cutoffs:** Setting thresholds for risk scores or predicted crime rates can have disproportionate impacts on different groups.\n", + "\n", + "* **Mitigation Strategies:**\n", + " * **Data Auditing and Cleaning:** Thoroughly examine and address biases in the training data. Consider oversampling underrepresented groups or using techniques to de-bias the data.\n", + " * **Algorithmic Auditing:** Regularly audit the algorithm's performance to identify and correct for bias. Use metrics beyond overall accuracy, focusing on fairness metrics (e.g., equal opportunity, predictive parity, calibration).\n", + " * **Transparency and Explainability:** Ensure that the AI's decision-making process is transparent and explainable to stakeholders, including law enforcement, policymakers, and the public. This allows for scrutiny and identification of potential biases.\n", + "\n", + "**2. Accountability:**\n", + "\n", + "* **Who is responsible when the AI makes a mistake?** Determining accountability is crucial. If an AI predicts a crime and leads to a wrongful arrest, who is held responsible: the software developer, the police department, the officer who acted on the prediction, or the AI itself (which is not a legal entity)?\n", + "* **Lack of Transparency:** \"Black box\" algorithms can make it difficult to understand how a prediction was made, making it challenging to hold anyone accountable for errors or biased outcomes.\n", + "* **Due Process Concerns:** Relying heavily on AI predictions can potentially undermine due process rights, as individuals may be targeted based on statistical probabilities rather than individual suspicion.\n", + "* **Mitigation Strategies:**\n", + " * **Clear Lines of Responsibility:** Establish clear lines of responsibility for the development, deployment, and use of predictive policing AI.\n", + " * **Human Oversight:** Implement robust human oversight mechanisms to ensure that AI predictions are not blindly followed but are carefully reviewed and validated by human officers.\n", + " * **Explainable AI (XAI):** Develop and deploy AI systems that provide explanations for their predictions, allowing for human review and scrutiny.\n", + " * **Independent Audits:** Conduct regular independent audits of predictive policing systems to assess their accuracy, fairness, and adherence to ethical guidelines.\n", + "\n", + "**3. Societal Impact:**\n", + "\n", + "* **Erosion of Trust:** If predictive policing systems are perceived as unfair or discriminatory, they can erode trust between law enforcement and the communities they serve.\n", + "* **Privacy Concerns:** Predictive policing systems often rely on the collection and analysis of large amounts of personal data, raising significant privacy concerns. The data used could include arrest records, social media activity, location data, and other sensitive information.\n", + "* **Chilling Effect on Civil Liberties:** The widespread use of predictive policing could have a chilling effect on civil liberties, as individuals may be less likely to engage in lawful activities if they fear being targeted by law enforcement based on AI predictions.\n", + "* **Displacement of Crime:** Predictive policing might simply displace crime to other areas, rather than addressing the root causes of crime.\n", + "* **Mitigation Strategies:**\n", + " * **Community Engagement:** Involve community members in the design, implementation, and oversight of predictive policing systems.\n", + " * **Data Minimization and Privacy Protection:** Collect and use only the data that is strictly necessary for predictive policing purposes and implement strong data security and privacy protections.\n", + " * **Transparency and Public Education:** Be transparent about how predictive policing systems work and how they are being used. Educate the public about the risks and benefits of these systems.\n", + " * **Focus on Root Causes of Crime:** Invest in programs that address the root causes of crime, such as poverty, inequality, and lack of opportunity. Predictive policing should not be seen as a substitute for addressing these underlying issues.\n", + "\n", + "**4. Alternatives and Trade-offs:**\n", + "\n", + "* **Consider non-AI solutions:** Before implementing AI-based predictive policing, explore alternative strategies, such as community policing, problem-oriented policing, and focused deterrence, which may be more effective and less ethically problematic.\n", + "* **Weigh the potential benefits against the risks:** Carefully weigh the potential benefits of predictive policing (e.g., crime reduction, improved resource allocation) against the risks of bias, accountability issues, and societal harm. Ensure that the benefits outweigh the risks.\n", + "\n", + "**Framework for Ethical Assessment:**\n", + "\n", + "A robust ethical assessment should involve the following steps:\n", + "\n", + "1. **Identify Stakeholders:** Determine who will be affected by the use of predictive policing (e.g., law enforcement, communities, individuals).\n", + "2. **Map Potential Harms and Benefits:** Identify the potential harms and benefits of the system for each stakeholder group.\n", + "3. **Evaluate Fairness and Equity:** Assess whether the system is fair and equitable to all stakeholders.\n", + "4. **Consider Privacy and Data Security:** Evaluate the system's privacy implications and data security measures.\n", + "5. **Determine Accountability Mechanisms:** Establish clear lines of responsibility and accountability for the system's performance.\n", + "6. **Engage Stakeholders in Dialogue:** Involve stakeholders in dialogue about the ethical implications of the system.\n", + "7. **Monitor and Evaluate:** Continuously monitor and evaluate the system's performance and ethical implications.\n", + "\n", + "In conclusion, using AI in predictive policing presents a complex web of ethical challenges. A responsible approach requires careful consideration of bias, accountability, and societal impact, along with proactive measures to mitigate risks and ensure fairness. Transparency, community engagement, and ongoing evaluation are essential to ensure that these systems are used in a way that promotes justice and protects civil liberties. Failure to do so risks perpetuating and amplifying existing inequalities within the criminal justice system.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "gemini = OpenAI(api_key=google_api_key, base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\")\n", + "model_name = \"gemini-2.0-flash\"\n", + "\n", + "response = gemini.chat.completions.create(model=model_name, messages=messages)\n", + "answer = response.choices[0].message.content\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "The use of artificial intelligence (AI) in **predictive policing** raises significant ethical concerns, particularly regarding **bias, accountability, and societal impact**. Below is a structured analysis of these implications:\n", + "\n", + "### **1. Bias in AI and Predictive Policing** \n", + "- **Data Bias**: Predictive policing relies on historical crime data, which may reflect **systemic biases** (e.g., over-policing in minority communities). If AI models are trained on biased data, they may perpetuate or even amplify discrimination. \n", + "- **Algorithmic Bias**: Machine learning models may reinforce **racial, socioeconomic, or geographic disparities** if not carefully audited. For example, facial recognition has been shown to misidentify people of color more frequently. \n", + "- **Feedback Loops**: If police are directed to patrol areas flagged by AI, they may record more crimes there, reinforcing the system’s bias in a self-fulfilling cycle. \n", + "\n", + "### **2. Accountability and Transparency** \n", + "- **Black Box Problem**: Many AI models (e.g., deep learning) are opaque, making it difficult to explain why certain predictions are made. This lack of transparency challenges **due process** and **legal accountability**. \n", + "- **Responsibility Gaps**: If an AI system leads to wrongful arrests or excessive policing, who is accountable—the developers, law enforcement, or policymakers? Clear **legal frameworks** are needed to assign liability. \n", + "- **Public Oversight**: Predictive policing tools are often proprietary, limiting public scrutiny. Ethical AI requires **auditability** and **community input** to prevent misuse. \n", + "\n", + "### **3. Societal Impact** \n", + "- **Erosion of Trust**: Over-reliance on AI may deepen distrust between law enforcement and marginalized communities, particularly if policing becomes more **automated and less human-judgment-based**. \n", + "- **Privacy Concerns**: Predictive policing often involves **mass surveillance** (e.g., facial recognition, social media monitoring), raising concerns about **civil liberties** and **government overreach**. \n", + "- **Reinforcement of Structural Inequities**: If AI disproportionately targets disadvantaged groups, it could worsen **social inequality** rather than reduce crime. \n", + "\n", + "### **Ethical Frameworks to Consider** \n", + "- **Fairness**: AI models should be rigorously tested for **disparate impact** and adjusted to minimize bias. \n", + "- **Transparency**: Policymakers should mandate **explainable AI** and public reporting on predictive policing outcomes. \n", + "- **Human Oversight**: AI should **assist**, not replace, human judgment in policing decisions. \n", + "- **Community Engagement**: Affected populations should have a say in whether and how predictive policing is deployed. \n", + "\n", + "### **Conclusion** \n", + "While AI in predictive policing has potential benefits (e.g., efficient resource allocation), its ethical risks—particularly bias, lack of accountability, and societal harm—demand **strict regulation, oversight, and continuous ethical evaluation**. A **human rights-centered approach** is essential to ensure AI serves justice rather than injustice. \n", + "\n", + "Would you like recommendations for mitigating these risks?" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "deepseek = OpenAI(api_key=deepseek_api_key, base_url=\"https://api.deepseek.com/v1\")\n", + "model_name = \"deepseek-chat\"\n", + "\n", + "response = deepseek.chat.completions.create(model=model_name, messages=messages)\n", + "answer = response.choices[0].message.content\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves considering multiple factors, including bias, accountability, and societal impact. Here's a comprehensive analysis of the ethical implications:\n", + "\n", + "**Bias:**\n", + "\n", + "1. **Data quality and selection:** AI algorithms rely on historical crime data, which may reflect existing biases in policing practices, such as racial profiling. If the data is biased, the AI system will learn and replicate these biases, leading to discriminatory policing.\n", + "2. **Algorithmic bias:** AI algorithms can perpetuate and amplify existing biases if they are not designed to account for them. For example, if an algorithm is trained on data that overrepresents certain demographic groups, it may be more likely to predict crime in those areas.\n", + "3. **Lack of transparency:** Complex AI algorithms can be difficult to interpret, making it challenging to identify and address biases.\n", + "\n", + "**Accountability:**\n", + "\n", + "1. **Lack of human oversight:** AI-driven predictive policing may lead to decisions being made without human oversight, reducing accountability and increasing the risk of errors or biases.\n", + "2. **Automated decision-making:** AI systems may make decisions based on complex algorithms, making it difficult to identify who is responsible for those decisions.\n", + "3. **Audit trails:** Maintaining audit trails and logs of AI-driven decisions is crucial to ensure accountability and transparency.\n", + "\n", + "**Societal Impact:**\n", + "\n", + "1. **Stigma and marginalization:** Predictive policing may lead to increased surveillance and targeting of specific communities, exacerbating existing social and economic disparities.\n", + "2. **Disproportionate impact on vulnerable groups:** AI-driven predictive policing may disproportionately affect vulnerable groups, such as low-income communities, racial minorities, or those with mental health issues.\n", + "3. **Community trust and legitimacy:** The use of AI in predictive policing may erode community trust in law enforcement, particularly if the technology is perceived as biased or unaccountable.\n", + "\n", + "**Additional Considerations:**\n", + "\n", + "1. **Transparency and explainability:** Ensuring that AI-driven predictive policing is transparent, explainable, and interpretable is crucial to build trust and accountability.\n", + "2. **Human rights and due process:** AI-driven predictive policing must be designed to respect human rights and due process, including the right to privacy, freedom from discrimination, and the right to a fair trial.\n", + "3. **Regulatory frameworks:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing is essential to ensure that the technology is used responsibly and ethically.\n", + "\n", + "**Mitigation Strategies:**\n", + "\n", + "1. **Data quality and validation:** Ensuring that data is accurate, complete, and unbiased is crucial to developing reliable AI systems.\n", + "2. **Algorithmic auditing and testing:** Regularly auditing and testing AI algorithms for bias and errors can help identify and address potential issues.\n", + "3. **Human oversight and review:** Implementing human oversight and review processes can help detect and correct errors or biases in AI-driven decisions.\n", + "4. **Community engagement and participation:** Engaging with communities and incorporating their concerns and feedback into the development and deployment of AI-driven predictive policing can help build trust and legitimacy.\n", + "5. **Regulatory frameworks and guidelines:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing can help ensure that the technology is used responsibly and ethically.\n", + "\n", + "In conclusion, analyzing the ethical implications of using AI in predictive policing requires careful consideration of factors such as bias, accountability, and societal impact. By acknowledging these challenges and implementing mitigation strategies, law enforcement agencies can ensure that AI-driven predictive policing is used responsibly and ethically, promoting fairness, transparency, and accountability." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "groq = OpenAI(api_key=groq_api_key, base_url=\"https://api.groq.com/openai/v1\")\n", + "model_name = \"llama-3.3-70b-versatile\"\n", + "\n", + "response = groq.chat.completions.create(model=model_name, messages=messages)\n", + "answer = response.choices[0].message.content\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## For the next cell, we will use Ollama\n", + "\n", + "Ollama runs a local web service that gives an OpenAI compatible endpoint, \n", + "and runs models locally using high performance C++ code.\n", + "\n", + "If you don't have Ollama, install it here by visiting https://ollama.com then pressing Download and following the instructions.\n", + "\n", + "After it's installed, you should be able to visit here: http://localhost:11434 and see the message \"Ollama is running\"\n", + "\n", + "You might need to restart Cursor (and maybe reboot). Then open a Terminal (control+\\`) and run `ollama serve`\n", + "\n", + "Useful Ollama commands (run these in the terminal, or with an exclamation mark in this notebook):\n", + "\n", + "`ollama pull ` downloads a model locally \n", + "`ollama ls` lists all the models you've downloaded \n", + "`ollama rm ` deletes the specified model from your downloads" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Super important - ignore me at your peril!

\n", + " The model called llama3.3 is FAR too large for home computers - it's not intended for personal computing and will consume all your resources! Stick with the nicely sized llama3.2 or llama3.2:1b and if you want larger, try llama3.1 or smaller variants of Qwen, Gemma, Phi or DeepSeek. See the the Ollama models page for a full list of models and sizes.\n", + " \n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n", + "pulling dde5aa3fc5ff... 100% ▕████████████████▏ 2.0 GB \u001b[K\n", + "pulling 966de95ca8a6... 100% ▕████████████████▏ 1.4 KB \u001b[K\n", + "pulling fcc5a6bec9da... 100% ▕████████████████▏ 7.7 KB \u001b[K\n", + "pulling a70ff7e570d9... 100% ▕████████████████▏ 6.0 KB \u001b[K\n", + "pulling 56bb8bd477a5... 100% ▕████████████████▏ 96 B \u001b[K\n", + "pulling 34bb5ab01051... 100% ▕████████████████▏ 561 B \u001b[K\n", + "verifying sha256 digest \u001b[K\n", + "writing manifest \u001b[K\n", + "success \u001b[K\u001b[?25h\u001b[?2026l\n" + ] + } + ], + "source": [ + "!ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires consideration of several key factors, including:\n", + "\n", + "1. **Bias**: AI algorithms can perpetuate existing biases in policing if they are trained on biased data sets. This can result in discriminatory practices, such as targeting certain communities or demographics.\n", + "2. **Accountability**: Predictive policing relies heavily on algorithms that may not be transparent or explainable. This lack of accountability raises concerns about the responsibility of policymakers and law enforcement officials for the decisions made by these systems.\n", + "3. **Societal impact**: The widespread use of predictive policing could exacerbate existing social inequalities, as it may lead to increased surveillance, harassment, and marginalization of already vulnerable populations.\n", + "\n", + "To address these ethical concerns, consider the following steps:\n", + "\n", + "1. **Data audits**: Conduct regular audits of data used to train AI algorithms, ensuring that they are diverse and representative of all communities.\n", + "2. **Algorithmic transparency**: Implement measures to provide transparent explanations for AI-driven decisions, enabling scrutiny and assessment.\n", + "3." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "model_name = \"llama3.2\"\n", + "\n", + "response = ollama.chat.completions.create(model=model_name, messages=messages)\n", + "answer = response.choices[0].message.content\n", + "\n", + "display(Markdown(answer))\n", + "competitors.append(model_name)\n", + "answers.append(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['gpt-4o-mini', 'claude-3-7-sonnet-latest', 'gemini-2.0-flash', 'deepseek-chat', 'llama-3.3-70b-versatile', 'llama3.2']\n", + "[\"Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves a multidimensional approach, considering several key factors such as bias, accountability, and societal impact. Here is a structured framework for this analysis:\\n\\n### 1. **Bias**\\n- **Data Bias**: AI systems rely heavily on historical data, which can perpetuate existing biases present in that data. If the data reflects racial or socioeconomic disparities in policing (e.g., over-policing in certain communities), the AI can reinforce these biases by predicting higher crime rates in these areas, leading to a self-fulfilling prophecy.\\n- **Algorithmic Bias**: The design of algorithms may also introduce biases if the developers unconsciously embed their own biases into the model. It's crucial to examine who created the algorithms and what assumptions were made during their development.\\n- **Mitigation Strategies**: Implementing techniques like algorithmic fairness audits and diverse training datasets can help reduce bias. Regularly assessing and adjusting algorithms as new data comes in can also be important.\\n\\n### 2. **Accountability**\\n- **Responsibility for Decisions**: If an AI system makes a predictive error that leads to wrongful arrest or civil liberties violations, determining accountability becomes complex. Clear lines of responsibility must be established—who is to blame: the algorithm developers, the law enforcement agency, or the policymakers who implemented the AI?\\n- **Transparency**: The opacity of many AI systems can hinder accountability. Stakeholders should demand transparency regarding how algorithms work and what data they use. This includes clear reporting on algorithmic decisions and their outcomes.\\n- **Human Oversight**: AI in predictive policing should not replace human judgment. Maintaining a system of human oversight can ensure that critical decisions, especially those impacting civil rights, are vetted through human interpretation and ethical considerations.\\n\\n### 3. **Societal Impact**\\n- **Civil Liberties**: The use of predictive policing can infringe on individuals' rights if it leads to excessive surveillance or profiling based on prediction rather than behavior. There's a fine line between preventive measures and civil rights violations.\\n- **Community Trust**: The deployment of AI in policing can affect community relationships with law enforcement. If communities perceive predictive policing as biased or unfair, it may lead to mistrust and decreased cooperation with law enforcement efforts.\\n- **Resource Allocation**: AI may skew resource allocation towards heavily policed communities, potentially exacerbating existing inequalities and diverting resources away from crime prevention and community investment in areas that may genuinely need them.\\n\\n### 4. **Ethical Frameworks**\\n- **Utilitarian Perspectives**: While predictive policing can potentially reduce crime detection and prevention, ethical evaluation must consider broader societal impacts, including the potential harm to affected communities.\\n- **Deontological Perspectives**: From a rights-based view, predictive policing must respect individual rights and freedoms, ensuring that law enforcement practices do not compromise the dignity and autonomy of individuals.\\n\\n### 5. **Public Engagement and Policy**\\n- **Community Consultation**: Engaging the community in discussions about the use of AI in policing can help bridge gaps and increase transparency. Public forums can provide a platform for feedback and concerns regarding predictive technologies.\\n- **Legislative Oversight**: Policymakers need to establish robust regulatory frameworks governing the use of predictive policing tools to safeguard civil liberties and ensure accountability and transparency throughout the process.\\n\\n### Conclusion\\nThe ethical implications of using AI in predictive policing are complex and multifaceted. A careful, considerate approach that addresses bias, ensures accountability, and considers the broader societal impacts is critical for navigating the challenges posed by these technologies. Implementing safeguards and engaging with communities can help harness the benefits of AI while minimizing its harms.\", '# Ethics of AI in Predictive Policing\\n\\nPredictive policing using AI presents several significant ethical challenges:\\n\\n## Bias Concerns\\n- Historical police data often contains embedded biases against marginalized communities\\n- Algorithms may perpetuate or amplify these biases, creating harmful feedback loops\\n- Risk of technological laundering where human bias is hidden behind a veneer of algorithmic objectivity\\n\\n## Accountability Issues\\n- \"Black box\" algorithms create difficulties in understanding how predictions are generated\\n- Unclear responsibility chains between developers, police departments, and officers\\n- Questions about legal recourse for citizens wrongfully targeted\\n\\n## Societal Impact\\n- Potential erosion of presumption of innocence by targeting individuals based on statistical likelihood\\n- Risk of creating over-policed communities, reinforcing existing social inequalities\\n- Privacy implications of mass data collection and algorithmic surveillance\\n\\nEthical implementation would require transparent algorithms, diverse training data, human oversight, regular auditing for bias, and community involvement in deployment decisions. The core question remains whether we can balance potential public safety benefits against risks to civil liberties and equal protection.', 'Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires a multi-faceted approach, considering not only the technology itself but also the complex social context in which it is deployed. Here\\'s a breakdown of the key factors:\\n\\n**1. Bias:**\\n\\n* **Source Data Bias:** AI models are trained on historical data, which often reflects existing biases within the criminal justice system. If past policing practices disproportionately targeted certain communities (e.g., due to racial profiling, socioeconomic disparities), the AI will learn and perpetuate these biases. This can lead to:\\n * **Reinforcement of Existing Inequalities:** Predictive policing may reinforce discriminatory practices by disproportionately focusing resources on marginalized communities, leading to more arrests, and further skewing the data the AI is trained on, creating a self-fulfilling prophecy.\\n * **Bias Amplification:** AI algorithms can amplify subtle biases in the data that might be difficult for humans to detect, leading to even more discriminatory outcomes.\\n * **Example:** If a system is trained on arrest data showing higher drug crime rates in a specific neighborhood, it may predict higher crime rates in that neighborhood even if the underlying reason is simply increased police presence and enforcement in that area.\\n\\n* **Algorithmic Bias:** Even with seemingly unbiased data, bias can creep into the algorithm itself during the design and development phase. This can be due to:\\n * **Feature Selection:** Choosing specific variables to predict crime may inadvertently correlate with protected characteristics (e.g., race, ethnicity, socioeconomic status).\\n * **Model Design:** Certain algorithms might inherently be more prone to bias than others.\\n * **Thresholds and Cutoffs:** Setting thresholds for risk scores or predicted crime rates can have disproportionate impacts on different groups.\\n\\n* **Mitigation Strategies:**\\n * **Data Auditing and Cleaning:** Thoroughly examine and address biases in the training data. Consider oversampling underrepresented groups or using techniques to de-bias the data.\\n * **Algorithmic Auditing:** Regularly audit the algorithm\\'s performance to identify and correct for bias. Use metrics beyond overall accuracy, focusing on fairness metrics (e.g., equal opportunity, predictive parity, calibration).\\n * **Transparency and Explainability:** Ensure that the AI\\'s decision-making process is transparent and explainable to stakeholders, including law enforcement, policymakers, and the public. This allows for scrutiny and identification of potential biases.\\n\\n**2. Accountability:**\\n\\n* **Who is responsible when the AI makes a mistake?** Determining accountability is crucial. If an AI predicts a crime and leads to a wrongful arrest, who is held responsible: the software developer, the police department, the officer who acted on the prediction, or the AI itself (which is not a legal entity)?\\n* **Lack of Transparency:** \"Black box\" algorithms can make it difficult to understand how a prediction was made, making it challenging to hold anyone accountable for errors or biased outcomes.\\n* **Due Process Concerns:** Relying heavily on AI predictions can potentially undermine due process rights, as individuals may be targeted based on statistical probabilities rather than individual suspicion.\\n* **Mitigation Strategies:**\\n * **Clear Lines of Responsibility:** Establish clear lines of responsibility for the development, deployment, and use of predictive policing AI.\\n * **Human Oversight:** Implement robust human oversight mechanisms to ensure that AI predictions are not blindly followed but are carefully reviewed and validated by human officers.\\n * **Explainable AI (XAI):** Develop and deploy AI systems that provide explanations for their predictions, allowing for human review and scrutiny.\\n * **Independent Audits:** Conduct regular independent audits of predictive policing systems to assess their accuracy, fairness, and adherence to ethical guidelines.\\n\\n**3. Societal Impact:**\\n\\n* **Erosion of Trust:** If predictive policing systems are perceived as unfair or discriminatory, they can erode trust between law enforcement and the communities they serve.\\n* **Privacy Concerns:** Predictive policing systems often rely on the collection and analysis of large amounts of personal data, raising significant privacy concerns. The data used could include arrest records, social media activity, location data, and other sensitive information.\\n* **Chilling Effect on Civil Liberties:** The widespread use of predictive policing could have a chilling effect on civil liberties, as individuals may be less likely to engage in lawful activities if they fear being targeted by law enforcement based on AI predictions.\\n* **Displacement of Crime:** Predictive policing might simply displace crime to other areas, rather than addressing the root causes of crime.\\n* **Mitigation Strategies:**\\n * **Community Engagement:** Involve community members in the design, implementation, and oversight of predictive policing systems.\\n * **Data Minimization and Privacy Protection:** Collect and use only the data that is strictly necessary for predictive policing purposes and implement strong data security and privacy protections.\\n * **Transparency and Public Education:** Be transparent about how predictive policing systems work and how they are being used. Educate the public about the risks and benefits of these systems.\\n * **Focus on Root Causes of Crime:** Invest in programs that address the root causes of crime, such as poverty, inequality, and lack of opportunity. Predictive policing should not be seen as a substitute for addressing these underlying issues.\\n\\n**4. Alternatives and Trade-offs:**\\n\\n* **Consider non-AI solutions:** Before implementing AI-based predictive policing, explore alternative strategies, such as community policing, problem-oriented policing, and focused deterrence, which may be more effective and less ethically problematic.\\n* **Weigh the potential benefits against the risks:** Carefully weigh the potential benefits of predictive policing (e.g., crime reduction, improved resource allocation) against the risks of bias, accountability issues, and societal harm. Ensure that the benefits outweigh the risks.\\n\\n**Framework for Ethical Assessment:**\\n\\nA robust ethical assessment should involve the following steps:\\n\\n1. **Identify Stakeholders:** Determine who will be affected by the use of predictive policing (e.g., law enforcement, communities, individuals).\\n2. **Map Potential Harms and Benefits:** Identify the potential harms and benefits of the system for each stakeholder group.\\n3. **Evaluate Fairness and Equity:** Assess whether the system is fair and equitable to all stakeholders.\\n4. **Consider Privacy and Data Security:** Evaluate the system\\'s privacy implications and data security measures.\\n5. **Determine Accountability Mechanisms:** Establish clear lines of responsibility and accountability for the system\\'s performance.\\n6. **Engage Stakeholders in Dialogue:** Involve stakeholders in dialogue about the ethical implications of the system.\\n7. **Monitor and Evaluate:** Continuously monitor and evaluate the system\\'s performance and ethical implications.\\n\\nIn conclusion, using AI in predictive policing presents a complex web of ethical challenges. A responsible approach requires careful consideration of bias, accountability, and societal impact, along with proactive measures to mitigate risks and ensure fairness. Transparency, community engagement, and ongoing evaluation are essential to ensure that these systems are used in a way that promotes justice and protects civil liberties. Failure to do so risks perpetuating and amplifying existing inequalities within the criminal justice system.\\n', 'The use of artificial intelligence (AI) in **predictive policing** raises significant ethical concerns, particularly regarding **bias, accountability, and societal impact**. Below is a structured analysis of these implications:\\n\\n### **1. Bias in AI and Predictive Policing** \\n- **Data Bias**: Predictive policing relies on historical crime data, which may reflect **systemic biases** (e.g., over-policing in minority communities). If AI models are trained on biased data, they may perpetuate or even amplify discrimination. \\n- **Algorithmic Bias**: Machine learning models may reinforce **racial, socioeconomic, or geographic disparities** if not carefully audited. For example, facial recognition has been shown to misidentify people of color more frequently. \\n- **Feedback Loops**: If police are directed to patrol areas flagged by AI, they may record more crimes there, reinforcing the system’s bias in a self-fulfilling cycle. \\n\\n### **2. Accountability and Transparency** \\n- **Black Box Problem**: Many AI models (e.g., deep learning) are opaque, making it difficult to explain why certain predictions are made. This lack of transparency challenges **due process** and **legal accountability**. \\n- **Responsibility Gaps**: If an AI system leads to wrongful arrests or excessive policing, who is accountable—the developers, law enforcement, or policymakers? Clear **legal frameworks** are needed to assign liability. \\n- **Public Oversight**: Predictive policing tools are often proprietary, limiting public scrutiny. Ethical AI requires **auditability** and **community input** to prevent misuse. \\n\\n### **3. Societal Impact** \\n- **Erosion of Trust**: Over-reliance on AI may deepen distrust between law enforcement and marginalized communities, particularly if policing becomes more **automated and less human-judgment-based**. \\n- **Privacy Concerns**: Predictive policing often involves **mass surveillance** (e.g., facial recognition, social media monitoring), raising concerns about **civil liberties** and **government overreach**. \\n- **Reinforcement of Structural Inequities**: If AI disproportionately targets disadvantaged groups, it could worsen **social inequality** rather than reduce crime. \\n\\n### **Ethical Frameworks to Consider** \\n- **Fairness**: AI models should be rigorously tested for **disparate impact** and adjusted to minimize bias. \\n- **Transparency**: Policymakers should mandate **explainable AI** and public reporting on predictive policing outcomes. \\n- **Human Oversight**: AI should **assist**, not replace, human judgment in policing decisions. \\n- **Community Engagement**: Affected populations should have a say in whether and how predictive policing is deployed. \\n\\n### **Conclusion** \\nWhile AI in predictive policing has potential benefits (e.g., efficient resource allocation), its ethical risks—particularly bias, lack of accountability, and societal harm—demand **strict regulation, oversight, and continuous ethical evaluation**. A **human rights-centered approach** is essential to ensure AI serves justice rather than injustice. \\n\\nWould you like recommendations for mitigating these risks?', \"Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves considering multiple factors, including bias, accountability, and societal impact. Here's a comprehensive analysis of the ethical implications:\\n\\n**Bias:**\\n\\n1. **Data quality and selection:** AI algorithms rely on historical crime data, which may reflect existing biases in policing practices, such as racial profiling. If the data is biased, the AI system will learn and replicate these biases, leading to discriminatory policing.\\n2. **Algorithmic bias:** AI algorithms can perpetuate and amplify existing biases if they are not designed to account for them. For example, if an algorithm is trained on data that overrepresents certain demographic groups, it may be more likely to predict crime in those areas.\\n3. **Lack of transparency:** Complex AI algorithms can be difficult to interpret, making it challenging to identify and address biases.\\n\\n**Accountability:**\\n\\n1. **Lack of human oversight:** AI-driven predictive policing may lead to decisions being made without human oversight, reducing accountability and increasing the risk of errors or biases.\\n2. **Automated decision-making:** AI systems may make decisions based on complex algorithms, making it difficult to identify who is responsible for those decisions.\\n3. **Audit trails:** Maintaining audit trails and logs of AI-driven decisions is crucial to ensure accountability and transparency.\\n\\n**Societal Impact:**\\n\\n1. **Stigma and marginalization:** Predictive policing may lead to increased surveillance and targeting of specific communities, exacerbating existing social and economic disparities.\\n2. **Disproportionate impact on vulnerable groups:** AI-driven predictive policing may disproportionately affect vulnerable groups, such as low-income communities, racial minorities, or those with mental health issues.\\n3. **Community trust and legitimacy:** The use of AI in predictive policing may erode community trust in law enforcement, particularly if the technology is perceived as biased or unaccountable.\\n\\n**Additional Considerations:**\\n\\n1. **Transparency and explainability:** Ensuring that AI-driven predictive policing is transparent, explainable, and interpretable is crucial to build trust and accountability.\\n2. **Human rights and due process:** AI-driven predictive policing must be designed to respect human rights and due process, including the right to privacy, freedom from discrimination, and the right to a fair trial.\\n3. **Regulatory frameworks:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing is essential to ensure that the technology is used responsibly and ethically.\\n\\n**Mitigation Strategies:**\\n\\n1. **Data quality and validation:** Ensuring that data is accurate, complete, and unbiased is crucial to developing reliable AI systems.\\n2. **Algorithmic auditing and testing:** Regularly auditing and testing AI algorithms for bias and errors can help identify and address potential issues.\\n3. **Human oversight and review:** Implementing human oversight and review processes can help detect and correct errors or biases in AI-driven decisions.\\n4. **Community engagement and participation:** Engaging with communities and incorporating their concerns and feedback into the development and deployment of AI-driven predictive policing can help build trust and legitimacy.\\n5. **Regulatory frameworks and guidelines:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing can help ensure that the technology is used responsibly and ethically.\\n\\nIn conclusion, analyzing the ethical implications of using AI in predictive policing requires careful consideration of factors such as bias, accountability, and societal impact. By acknowledging these challenges and implementing mitigation strategies, law enforcement agencies can ensure that AI-driven predictive policing is used responsibly and ethically, promoting fairness, transparency, and accountability.\", 'Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires consideration of several key factors, including:\\n\\n1. **Bias**: AI algorithms can perpetuate existing biases in policing if they are trained on biased data sets. This can result in discriminatory practices, such as targeting certain communities or demographics.\\n2. **Accountability**: Predictive policing relies heavily on algorithms that may not be transparent or explainable. This lack of accountability raises concerns about the responsibility of policymakers and law enforcement officials for the decisions made by these systems.\\n3. **Societal impact**: The widespread use of predictive policing could exacerbate existing social inequalities, as it may lead to increased surveillance, harassment, and marginalization of already vulnerable populations.\\n\\nTo address these ethical concerns, consider the following steps:\\n\\n1. **Data audits**: Conduct regular audits of data used to train AI algorithms, ensuring that they are diverse and representative of all communities.\\n2. **Algorithmic transparency**: Implement measures to provide transparent explanations for AI-driven decisions, enabling scrutiny and assessment.\\n3.']\n" + ] + } + ], + "source": [ + "# So where are we?\n", + "\n", + "print(competitors)\n", + "print(answers)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Competitor: gpt-4o-mini\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves a multidimensional approach, considering several key factors such as bias, accountability, and societal impact. Here is a structured framework for this analysis:\n", + "\n", + "### 1. **Bias**\n", + "- **Data Bias**: AI systems rely heavily on historical data, which can perpetuate existing biases present in that data. If the data reflects racial or socioeconomic disparities in policing (e.g., over-policing in certain communities), the AI can reinforce these biases by predicting higher crime rates in these areas, leading to a self-fulfilling prophecy.\n", + "- **Algorithmic Bias**: The design of algorithms may also introduce biases if the developers unconsciously embed their own biases into the model. It's crucial to examine who created the algorithms and what assumptions were made during their development.\n", + "- **Mitigation Strategies**: Implementing techniques like algorithmic fairness audits and diverse training datasets can help reduce bias. Regularly assessing and adjusting algorithms as new data comes in can also be important.\n", + "\n", + "### 2. **Accountability**\n", + "- **Responsibility for Decisions**: If an AI system makes a predictive error that leads to wrongful arrest or civil liberties violations, determining accountability becomes complex. Clear lines of responsibility must be established—who is to blame: the algorithm developers, the law enforcement agency, or the policymakers who implemented the AI?\n", + "- **Transparency**: The opacity of many AI systems can hinder accountability. Stakeholders should demand transparency regarding how algorithms work and what data they use. This includes clear reporting on algorithmic decisions and their outcomes.\n", + "- **Human Oversight**: AI in predictive policing should not replace human judgment. Maintaining a system of human oversight can ensure that critical decisions, especially those impacting civil rights, are vetted through human interpretation and ethical considerations.\n", + "\n", + "### 3. **Societal Impact**\n", + "- **Civil Liberties**: The use of predictive policing can infringe on individuals' rights if it leads to excessive surveillance or profiling based on prediction rather than behavior. There's a fine line between preventive measures and civil rights violations.\n", + "- **Community Trust**: The deployment of AI in policing can affect community relationships with law enforcement. If communities perceive predictive policing as biased or unfair, it may lead to mistrust and decreased cooperation with law enforcement efforts.\n", + "- **Resource Allocation**: AI may skew resource allocation towards heavily policed communities, potentially exacerbating existing inequalities and diverting resources away from crime prevention and community investment in areas that may genuinely need them.\n", + "\n", + "### 4. **Ethical Frameworks**\n", + "- **Utilitarian Perspectives**: While predictive policing can potentially reduce crime detection and prevention, ethical evaluation must consider broader societal impacts, including the potential harm to affected communities.\n", + "- **Deontological Perspectives**: From a rights-based view, predictive policing must respect individual rights and freedoms, ensuring that law enforcement practices do not compromise the dignity and autonomy of individuals.\n", + "\n", + "### 5. **Public Engagement and Policy**\n", + "- **Community Consultation**: Engaging the community in discussions about the use of AI in policing can help bridge gaps and increase transparency. Public forums can provide a platform for feedback and concerns regarding predictive technologies.\n", + "- **Legislative Oversight**: Policymakers need to establish robust regulatory frameworks governing the use of predictive policing tools to safeguard civil liberties and ensure accountability and transparency throughout the process.\n", + "\n", + "### Conclusion\n", + "The ethical implications of using AI in predictive policing are complex and multifaceted. A careful, considerate approach that addresses bias, ensures accountability, and considers the broader societal impacts is critical for navigating the challenges posed by these technologies. Implementing safeguards and engaging with communities can help harness the benefits of AI while minimizing its harms.\n", + "Competitor: claude-3-7-sonnet-latest\n", + "\n", + "# Ethics of AI in Predictive Policing\n", + "\n", + "Predictive policing using AI presents several significant ethical challenges:\n", + "\n", + "## Bias Concerns\n", + "- Historical police data often contains embedded biases against marginalized communities\n", + "- Algorithms may perpetuate or amplify these biases, creating harmful feedback loops\n", + "- Risk of technological laundering where human bias is hidden behind a veneer of algorithmic objectivity\n", + "\n", + "## Accountability Issues\n", + "- \"Black box\" algorithms create difficulties in understanding how predictions are generated\n", + "- Unclear responsibility chains between developers, police departments, and officers\n", + "- Questions about legal recourse for citizens wrongfully targeted\n", + "\n", + "## Societal Impact\n", + "- Potential erosion of presumption of innocence by targeting individuals based on statistical likelihood\n", + "- Risk of creating over-policed communities, reinforcing existing social inequalities\n", + "- Privacy implications of mass data collection and algorithmic surveillance\n", + "\n", + "Ethical implementation would require transparent algorithms, diverse training data, human oversight, regular auditing for bias, and community involvement in deployment decisions. The core question remains whether we can balance potential public safety benefits against risks to civil liberties and equal protection.\n", + "Competitor: gemini-2.0-flash\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires a multi-faceted approach, considering not only the technology itself but also the complex social context in which it is deployed. Here's a breakdown of the key factors:\n", + "\n", + "**1. Bias:**\n", + "\n", + "* **Source Data Bias:** AI models are trained on historical data, which often reflects existing biases within the criminal justice system. If past policing practices disproportionately targeted certain communities (e.g., due to racial profiling, socioeconomic disparities), the AI will learn and perpetuate these biases. This can lead to:\n", + " * **Reinforcement of Existing Inequalities:** Predictive policing may reinforce discriminatory practices by disproportionately focusing resources on marginalized communities, leading to more arrests, and further skewing the data the AI is trained on, creating a self-fulfilling prophecy.\n", + " * **Bias Amplification:** AI algorithms can amplify subtle biases in the data that might be difficult for humans to detect, leading to even more discriminatory outcomes.\n", + " * **Example:** If a system is trained on arrest data showing higher drug crime rates in a specific neighborhood, it may predict higher crime rates in that neighborhood even if the underlying reason is simply increased police presence and enforcement in that area.\n", + "\n", + "* **Algorithmic Bias:** Even with seemingly unbiased data, bias can creep into the algorithm itself during the design and development phase. This can be due to:\n", + " * **Feature Selection:** Choosing specific variables to predict crime may inadvertently correlate with protected characteristics (e.g., race, ethnicity, socioeconomic status).\n", + " * **Model Design:** Certain algorithms might inherently be more prone to bias than others.\n", + " * **Thresholds and Cutoffs:** Setting thresholds for risk scores or predicted crime rates can have disproportionate impacts on different groups.\n", + "\n", + "* **Mitigation Strategies:**\n", + " * **Data Auditing and Cleaning:** Thoroughly examine and address biases in the training data. Consider oversampling underrepresented groups or using techniques to de-bias the data.\n", + " * **Algorithmic Auditing:** Regularly audit the algorithm's performance to identify and correct for bias. Use metrics beyond overall accuracy, focusing on fairness metrics (e.g., equal opportunity, predictive parity, calibration).\n", + " * **Transparency and Explainability:** Ensure that the AI's decision-making process is transparent and explainable to stakeholders, including law enforcement, policymakers, and the public. This allows for scrutiny and identification of potential biases.\n", + "\n", + "**2. Accountability:**\n", + "\n", + "* **Who is responsible when the AI makes a mistake?** Determining accountability is crucial. If an AI predicts a crime and leads to a wrongful arrest, who is held responsible: the software developer, the police department, the officer who acted on the prediction, or the AI itself (which is not a legal entity)?\n", + "* **Lack of Transparency:** \"Black box\" algorithms can make it difficult to understand how a prediction was made, making it challenging to hold anyone accountable for errors or biased outcomes.\n", + "* **Due Process Concerns:** Relying heavily on AI predictions can potentially undermine due process rights, as individuals may be targeted based on statistical probabilities rather than individual suspicion.\n", + "* **Mitigation Strategies:**\n", + " * **Clear Lines of Responsibility:** Establish clear lines of responsibility for the development, deployment, and use of predictive policing AI.\n", + " * **Human Oversight:** Implement robust human oversight mechanisms to ensure that AI predictions are not blindly followed but are carefully reviewed and validated by human officers.\n", + " * **Explainable AI (XAI):** Develop and deploy AI systems that provide explanations for their predictions, allowing for human review and scrutiny.\n", + " * **Independent Audits:** Conduct regular independent audits of predictive policing systems to assess their accuracy, fairness, and adherence to ethical guidelines.\n", + "\n", + "**3. Societal Impact:**\n", + "\n", + "* **Erosion of Trust:** If predictive policing systems are perceived as unfair or discriminatory, they can erode trust between law enforcement and the communities they serve.\n", + "* **Privacy Concerns:** Predictive policing systems often rely on the collection and analysis of large amounts of personal data, raising significant privacy concerns. The data used could include arrest records, social media activity, location data, and other sensitive information.\n", + "* **Chilling Effect on Civil Liberties:** The widespread use of predictive policing could have a chilling effect on civil liberties, as individuals may be less likely to engage in lawful activities if they fear being targeted by law enforcement based on AI predictions.\n", + "* **Displacement of Crime:** Predictive policing might simply displace crime to other areas, rather than addressing the root causes of crime.\n", + "* **Mitigation Strategies:**\n", + " * **Community Engagement:** Involve community members in the design, implementation, and oversight of predictive policing systems.\n", + " * **Data Minimization and Privacy Protection:** Collect and use only the data that is strictly necessary for predictive policing purposes and implement strong data security and privacy protections.\n", + " * **Transparency and Public Education:** Be transparent about how predictive policing systems work and how they are being used. Educate the public about the risks and benefits of these systems.\n", + " * **Focus on Root Causes of Crime:** Invest in programs that address the root causes of crime, such as poverty, inequality, and lack of opportunity. Predictive policing should not be seen as a substitute for addressing these underlying issues.\n", + "\n", + "**4. Alternatives and Trade-offs:**\n", + "\n", + "* **Consider non-AI solutions:** Before implementing AI-based predictive policing, explore alternative strategies, such as community policing, problem-oriented policing, and focused deterrence, which may be more effective and less ethically problematic.\n", + "* **Weigh the potential benefits against the risks:** Carefully weigh the potential benefits of predictive policing (e.g., crime reduction, improved resource allocation) against the risks of bias, accountability issues, and societal harm. Ensure that the benefits outweigh the risks.\n", + "\n", + "**Framework for Ethical Assessment:**\n", + "\n", + "A robust ethical assessment should involve the following steps:\n", + "\n", + "1. **Identify Stakeholders:** Determine who will be affected by the use of predictive policing (e.g., law enforcement, communities, individuals).\n", + "2. **Map Potential Harms and Benefits:** Identify the potential harms and benefits of the system for each stakeholder group.\n", + "3. **Evaluate Fairness and Equity:** Assess whether the system is fair and equitable to all stakeholders.\n", + "4. **Consider Privacy and Data Security:** Evaluate the system's privacy implications and data security measures.\n", + "5. **Determine Accountability Mechanisms:** Establish clear lines of responsibility and accountability for the system's performance.\n", + "6. **Engage Stakeholders in Dialogue:** Involve stakeholders in dialogue about the ethical implications of the system.\n", + "7. **Monitor and Evaluate:** Continuously monitor and evaluate the system's performance and ethical implications.\n", + "\n", + "In conclusion, using AI in predictive policing presents a complex web of ethical challenges. A responsible approach requires careful consideration of bias, accountability, and societal impact, along with proactive measures to mitigate risks and ensure fairness. Transparency, community engagement, and ongoing evaluation are essential to ensure that these systems are used in a way that promotes justice and protects civil liberties. Failure to do so risks perpetuating and amplifying existing inequalities within the criminal justice system.\n", + "\n", + "Competitor: deepseek-chat\n", + "\n", + "The use of artificial intelligence (AI) in **predictive policing** raises significant ethical concerns, particularly regarding **bias, accountability, and societal impact**. Below is a structured analysis of these implications:\n", + "\n", + "### **1. Bias in AI and Predictive Policing** \n", + "- **Data Bias**: Predictive policing relies on historical crime data, which may reflect **systemic biases** (e.g., over-policing in minority communities). If AI models are trained on biased data, they may perpetuate or even amplify discrimination. \n", + "- **Algorithmic Bias**: Machine learning models may reinforce **racial, socioeconomic, or geographic disparities** if not carefully audited. For example, facial recognition has been shown to misidentify people of color more frequently. \n", + "- **Feedback Loops**: If police are directed to patrol areas flagged by AI, they may record more crimes there, reinforcing the system’s bias in a self-fulfilling cycle. \n", + "\n", + "### **2. Accountability and Transparency** \n", + "- **Black Box Problem**: Many AI models (e.g., deep learning) are opaque, making it difficult to explain why certain predictions are made. This lack of transparency challenges **due process** and **legal accountability**. \n", + "- **Responsibility Gaps**: If an AI system leads to wrongful arrests or excessive policing, who is accountable—the developers, law enforcement, or policymakers? Clear **legal frameworks** are needed to assign liability. \n", + "- **Public Oversight**: Predictive policing tools are often proprietary, limiting public scrutiny. Ethical AI requires **auditability** and **community input** to prevent misuse. \n", + "\n", + "### **3. Societal Impact** \n", + "- **Erosion of Trust**: Over-reliance on AI may deepen distrust between law enforcement and marginalized communities, particularly if policing becomes more **automated and less human-judgment-based**. \n", + "- **Privacy Concerns**: Predictive policing often involves **mass surveillance** (e.g., facial recognition, social media monitoring), raising concerns about **civil liberties** and **government overreach**. \n", + "- **Reinforcement of Structural Inequities**: If AI disproportionately targets disadvantaged groups, it could worsen **social inequality** rather than reduce crime. \n", + "\n", + "### **Ethical Frameworks to Consider** \n", + "- **Fairness**: AI models should be rigorously tested for **disparate impact** and adjusted to minimize bias. \n", + "- **Transparency**: Policymakers should mandate **explainable AI** and public reporting on predictive policing outcomes. \n", + "- **Human Oversight**: AI should **assist**, not replace, human judgment in policing decisions. \n", + "- **Community Engagement**: Affected populations should have a say in whether and how predictive policing is deployed. \n", + "\n", + "### **Conclusion** \n", + "While AI in predictive policing has potential benefits (e.g., efficient resource allocation), its ethical risks—particularly bias, lack of accountability, and societal harm—demand **strict regulation, oversight, and continuous ethical evaluation**. A **human rights-centered approach** is essential to ensure AI serves justice rather than injustice. \n", + "\n", + "Would you like recommendations for mitigating these risks?\n", + "Competitor: llama-3.3-70b-versatile\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves considering multiple factors, including bias, accountability, and societal impact. Here's a comprehensive analysis of the ethical implications:\n", + "\n", + "**Bias:**\n", + "\n", + "1. **Data quality and selection:** AI algorithms rely on historical crime data, which may reflect existing biases in policing practices, such as racial profiling. If the data is biased, the AI system will learn and replicate these biases, leading to discriminatory policing.\n", + "2. **Algorithmic bias:** AI algorithms can perpetuate and amplify existing biases if they are not designed to account for them. For example, if an algorithm is trained on data that overrepresents certain demographic groups, it may be more likely to predict crime in those areas.\n", + "3. **Lack of transparency:** Complex AI algorithms can be difficult to interpret, making it challenging to identify and address biases.\n", + "\n", + "**Accountability:**\n", + "\n", + "1. **Lack of human oversight:** AI-driven predictive policing may lead to decisions being made without human oversight, reducing accountability and increasing the risk of errors or biases.\n", + "2. **Automated decision-making:** AI systems may make decisions based on complex algorithms, making it difficult to identify who is responsible for those decisions.\n", + "3. **Audit trails:** Maintaining audit trails and logs of AI-driven decisions is crucial to ensure accountability and transparency.\n", + "\n", + "**Societal Impact:**\n", + "\n", + "1. **Stigma and marginalization:** Predictive policing may lead to increased surveillance and targeting of specific communities, exacerbating existing social and economic disparities.\n", + "2. **Disproportionate impact on vulnerable groups:** AI-driven predictive policing may disproportionately affect vulnerable groups, such as low-income communities, racial minorities, or those with mental health issues.\n", + "3. **Community trust and legitimacy:** The use of AI in predictive policing may erode community trust in law enforcement, particularly if the technology is perceived as biased or unaccountable.\n", + "\n", + "**Additional Considerations:**\n", + "\n", + "1. **Transparency and explainability:** Ensuring that AI-driven predictive policing is transparent, explainable, and interpretable is crucial to build trust and accountability.\n", + "2. **Human rights and due process:** AI-driven predictive policing must be designed to respect human rights and due process, including the right to privacy, freedom from discrimination, and the right to a fair trial.\n", + "3. **Regulatory frameworks:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing is essential to ensure that the technology is used responsibly and ethically.\n", + "\n", + "**Mitigation Strategies:**\n", + "\n", + "1. **Data quality and validation:** Ensuring that data is accurate, complete, and unbiased is crucial to developing reliable AI systems.\n", + "2. **Algorithmic auditing and testing:** Regularly auditing and testing AI algorithms for bias and errors can help identify and address potential issues.\n", + "3. **Human oversight and review:** Implementing human oversight and review processes can help detect and correct errors or biases in AI-driven decisions.\n", + "4. **Community engagement and participation:** Engaging with communities and incorporating their concerns and feedback into the development and deployment of AI-driven predictive policing can help build trust and legitimacy.\n", + "5. **Regulatory frameworks and guidelines:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing can help ensure that the technology is used responsibly and ethically.\n", + "\n", + "In conclusion, analyzing the ethical implications of using AI in predictive policing requires careful consideration of factors such as bias, accountability, and societal impact. By acknowledging these challenges and implementing mitigation strategies, law enforcement agencies can ensure that AI-driven predictive policing is used responsibly and ethically, promoting fairness, transparency, and accountability.\n", + "Competitor: llama3.2\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires consideration of several key factors, including:\n", + "\n", + "1. **Bias**: AI algorithms can perpetuate existing biases in policing if they are trained on biased data sets. This can result in discriminatory practices, such as targeting certain communities or demographics.\n", + "2. **Accountability**: Predictive policing relies heavily on algorithms that may not be transparent or explainable. This lack of accountability raises concerns about the responsibility of policymakers and law enforcement officials for the decisions made by these systems.\n", + "3. **Societal impact**: The widespread use of predictive policing could exacerbate existing social inequalities, as it may lead to increased surveillance, harassment, and marginalization of already vulnerable populations.\n", + "\n", + "To address these ethical concerns, consider the following steps:\n", + "\n", + "1. **Data audits**: Conduct regular audits of data used to train AI algorithms, ensuring that they are diverse and representative of all communities.\n", + "2. **Algorithmic transparency**: Implement measures to provide transparent explanations for AI-driven decisions, enabling scrutiny and assessment.\n", + "3.\n" + ] + } + ], + "source": [ + "# It's nice to know how to use \"zip\"\n", + "for competitor, answer in zip(competitors, answers):\n", + " print(f\"Competitor: {competitor}\\n\\n{answer}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's bring this together - note the use of \"enumerate\"\n", + "\n", + "together = \"\"\n", + "for index, answer in enumerate(answers):\n", + " together += f\"# Response from competitor {index+1}\\n\\n\"\n", + " together += answer + \"\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Response from competitor 1\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves a multidimensional approach, considering several key factors such as bias, accountability, and societal impact. Here is a structured framework for this analysis:\n", + "\n", + "### 1. **Bias**\n", + "- **Data Bias**: AI systems rely heavily on historical data, which can perpetuate existing biases present in that data. If the data reflects racial or socioeconomic disparities in policing (e.g., over-policing in certain communities), the AI can reinforce these biases by predicting higher crime rates in these areas, leading to a self-fulfilling prophecy.\n", + "- **Algorithmic Bias**: The design of algorithms may also introduce biases if the developers unconsciously embed their own biases into the model. It's crucial to examine who created the algorithms and what assumptions were made during their development.\n", + "- **Mitigation Strategies**: Implementing techniques like algorithmic fairness audits and diverse training datasets can help reduce bias. Regularly assessing and adjusting algorithms as new data comes in can also be important.\n", + "\n", + "### 2. **Accountability**\n", + "- **Responsibility for Decisions**: If an AI system makes a predictive error that leads to wrongful arrest or civil liberties violations, determining accountability becomes complex. Clear lines of responsibility must be established—who is to blame: the algorithm developers, the law enforcement agency, or the policymakers who implemented the AI?\n", + "- **Transparency**: The opacity of many AI systems can hinder accountability. Stakeholders should demand transparency regarding how algorithms work and what data they use. This includes clear reporting on algorithmic decisions and their outcomes.\n", + "- **Human Oversight**: AI in predictive policing should not replace human judgment. Maintaining a system of human oversight can ensure that critical decisions, especially those impacting civil rights, are vetted through human interpretation and ethical considerations.\n", + "\n", + "### 3. **Societal Impact**\n", + "- **Civil Liberties**: The use of predictive policing can infringe on individuals' rights if it leads to excessive surveillance or profiling based on prediction rather than behavior. There's a fine line between preventive measures and civil rights violations.\n", + "- **Community Trust**: The deployment of AI in policing can affect community relationships with law enforcement. If communities perceive predictive policing as biased or unfair, it may lead to mistrust and decreased cooperation with law enforcement efforts.\n", + "- **Resource Allocation**: AI may skew resource allocation towards heavily policed communities, potentially exacerbating existing inequalities and diverting resources away from crime prevention and community investment in areas that may genuinely need them.\n", + "\n", + "### 4. **Ethical Frameworks**\n", + "- **Utilitarian Perspectives**: While predictive policing can potentially reduce crime detection and prevention, ethical evaluation must consider broader societal impacts, including the potential harm to affected communities.\n", + "- **Deontological Perspectives**: From a rights-based view, predictive policing must respect individual rights and freedoms, ensuring that law enforcement practices do not compromise the dignity and autonomy of individuals.\n", + "\n", + "### 5. **Public Engagement and Policy**\n", + "- **Community Consultation**: Engaging the community in discussions about the use of AI in policing can help bridge gaps and increase transparency. Public forums can provide a platform for feedback and concerns regarding predictive technologies.\n", + "- **Legislative Oversight**: Policymakers need to establish robust regulatory frameworks governing the use of predictive policing tools to safeguard civil liberties and ensure accountability and transparency throughout the process.\n", + "\n", + "### Conclusion\n", + "The ethical implications of using AI in predictive policing are complex and multifaceted. A careful, considerate approach that addresses bias, ensures accountability, and considers the broader societal impacts is critical for navigating the challenges posed by these technologies. Implementing safeguards and engaging with communities can help harness the benefits of AI while minimizing its harms.\n", + "\n", + "# Response from competitor 2\n", + "\n", + "# Ethics of AI in Predictive Policing\n", + "\n", + "Predictive policing using AI presents several significant ethical challenges:\n", + "\n", + "## Bias Concerns\n", + "- Historical police data often contains embedded biases against marginalized communities\n", + "- Algorithms may perpetuate or amplify these biases, creating harmful feedback loops\n", + "- Risk of technological laundering where human bias is hidden behind a veneer of algorithmic objectivity\n", + "\n", + "## Accountability Issues\n", + "- \"Black box\" algorithms create difficulties in understanding how predictions are generated\n", + "- Unclear responsibility chains between developers, police departments, and officers\n", + "- Questions about legal recourse for citizens wrongfully targeted\n", + "\n", + "## Societal Impact\n", + "- Potential erosion of presumption of innocence by targeting individuals based on statistical likelihood\n", + "- Risk of creating over-policed communities, reinforcing existing social inequalities\n", + "- Privacy implications of mass data collection and algorithmic surveillance\n", + "\n", + "Ethical implementation would require transparent algorithms, diverse training data, human oversight, regular auditing for bias, and community involvement in deployment decisions. The core question remains whether we can balance potential public safety benefits against risks to civil liberties and equal protection.\n", + "\n", + "# Response from competitor 3\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires a multi-faceted approach, considering not only the technology itself but also the complex social context in which it is deployed. Here's a breakdown of the key factors:\n", + "\n", + "**1. Bias:**\n", + "\n", + "* **Source Data Bias:** AI models are trained on historical data, which often reflects existing biases within the criminal justice system. If past policing practices disproportionately targeted certain communities (e.g., due to racial profiling, socioeconomic disparities), the AI will learn and perpetuate these biases. This can lead to:\n", + " * **Reinforcement of Existing Inequalities:** Predictive policing may reinforce discriminatory practices by disproportionately focusing resources on marginalized communities, leading to more arrests, and further skewing the data the AI is trained on, creating a self-fulfilling prophecy.\n", + " * **Bias Amplification:** AI algorithms can amplify subtle biases in the data that might be difficult for humans to detect, leading to even more discriminatory outcomes.\n", + " * **Example:** If a system is trained on arrest data showing higher drug crime rates in a specific neighborhood, it may predict higher crime rates in that neighborhood even if the underlying reason is simply increased police presence and enforcement in that area.\n", + "\n", + "* **Algorithmic Bias:** Even with seemingly unbiased data, bias can creep into the algorithm itself during the design and development phase. This can be due to:\n", + " * **Feature Selection:** Choosing specific variables to predict crime may inadvertently correlate with protected characteristics (e.g., race, ethnicity, socioeconomic status).\n", + " * **Model Design:** Certain algorithms might inherently be more prone to bias than others.\n", + " * **Thresholds and Cutoffs:** Setting thresholds for risk scores or predicted crime rates can have disproportionate impacts on different groups.\n", + "\n", + "* **Mitigation Strategies:**\n", + " * **Data Auditing and Cleaning:** Thoroughly examine and address biases in the training data. Consider oversampling underrepresented groups or using techniques to de-bias the data.\n", + " * **Algorithmic Auditing:** Regularly audit the algorithm's performance to identify and correct for bias. Use metrics beyond overall accuracy, focusing on fairness metrics (e.g., equal opportunity, predictive parity, calibration).\n", + " * **Transparency and Explainability:** Ensure that the AI's decision-making process is transparent and explainable to stakeholders, including law enforcement, policymakers, and the public. This allows for scrutiny and identification of potential biases.\n", + "\n", + "**2. Accountability:**\n", + "\n", + "* **Who is responsible when the AI makes a mistake?** Determining accountability is crucial. If an AI predicts a crime and leads to a wrongful arrest, who is held responsible: the software developer, the police department, the officer who acted on the prediction, or the AI itself (which is not a legal entity)?\n", + "* **Lack of Transparency:** \"Black box\" algorithms can make it difficult to understand how a prediction was made, making it challenging to hold anyone accountable for errors or biased outcomes.\n", + "* **Due Process Concerns:** Relying heavily on AI predictions can potentially undermine due process rights, as individuals may be targeted based on statistical probabilities rather than individual suspicion.\n", + "* **Mitigation Strategies:**\n", + " * **Clear Lines of Responsibility:** Establish clear lines of responsibility for the development, deployment, and use of predictive policing AI.\n", + " * **Human Oversight:** Implement robust human oversight mechanisms to ensure that AI predictions are not blindly followed but are carefully reviewed and validated by human officers.\n", + " * **Explainable AI (XAI):** Develop and deploy AI systems that provide explanations for their predictions, allowing for human review and scrutiny.\n", + " * **Independent Audits:** Conduct regular independent audits of predictive policing systems to assess their accuracy, fairness, and adherence to ethical guidelines.\n", + "\n", + "**3. Societal Impact:**\n", + "\n", + "* **Erosion of Trust:** If predictive policing systems are perceived as unfair or discriminatory, they can erode trust between law enforcement and the communities they serve.\n", + "* **Privacy Concerns:** Predictive policing systems often rely on the collection and analysis of large amounts of personal data, raising significant privacy concerns. The data used could include arrest records, social media activity, location data, and other sensitive information.\n", + "* **Chilling Effect on Civil Liberties:** The widespread use of predictive policing could have a chilling effect on civil liberties, as individuals may be less likely to engage in lawful activities if they fear being targeted by law enforcement based on AI predictions.\n", + "* **Displacement of Crime:** Predictive policing might simply displace crime to other areas, rather than addressing the root causes of crime.\n", + "* **Mitigation Strategies:**\n", + " * **Community Engagement:** Involve community members in the design, implementation, and oversight of predictive policing systems.\n", + " * **Data Minimization and Privacy Protection:** Collect and use only the data that is strictly necessary for predictive policing purposes and implement strong data security and privacy protections.\n", + " * **Transparency and Public Education:** Be transparent about how predictive policing systems work and how they are being used. Educate the public about the risks and benefits of these systems.\n", + " * **Focus on Root Causes of Crime:** Invest in programs that address the root causes of crime, such as poverty, inequality, and lack of opportunity. Predictive policing should not be seen as a substitute for addressing these underlying issues.\n", + "\n", + "**4. Alternatives and Trade-offs:**\n", + "\n", + "* **Consider non-AI solutions:** Before implementing AI-based predictive policing, explore alternative strategies, such as community policing, problem-oriented policing, and focused deterrence, which may be more effective and less ethically problematic.\n", + "* **Weigh the potential benefits against the risks:** Carefully weigh the potential benefits of predictive policing (e.g., crime reduction, improved resource allocation) against the risks of bias, accountability issues, and societal harm. Ensure that the benefits outweigh the risks.\n", + "\n", + "**Framework for Ethical Assessment:**\n", + "\n", + "A robust ethical assessment should involve the following steps:\n", + "\n", + "1. **Identify Stakeholders:** Determine who will be affected by the use of predictive policing (e.g., law enforcement, communities, individuals).\n", + "2. **Map Potential Harms and Benefits:** Identify the potential harms and benefits of the system for each stakeholder group.\n", + "3. **Evaluate Fairness and Equity:** Assess whether the system is fair and equitable to all stakeholders.\n", + "4. **Consider Privacy and Data Security:** Evaluate the system's privacy implications and data security measures.\n", + "5. **Determine Accountability Mechanisms:** Establish clear lines of responsibility and accountability for the system's performance.\n", + "6. **Engage Stakeholders in Dialogue:** Involve stakeholders in dialogue about the ethical implications of the system.\n", + "7. **Monitor and Evaluate:** Continuously monitor and evaluate the system's performance and ethical implications.\n", + "\n", + "In conclusion, using AI in predictive policing presents a complex web of ethical challenges. A responsible approach requires careful consideration of bias, accountability, and societal impact, along with proactive measures to mitigate risks and ensure fairness. Transparency, community engagement, and ongoing evaluation are essential to ensure that these systems are used in a way that promotes justice and protects civil liberties. Failure to do so risks perpetuating and amplifying existing inequalities within the criminal justice system.\n", + "\n", + "\n", + "# Response from competitor 4\n", + "\n", + "The use of artificial intelligence (AI) in **predictive policing** raises significant ethical concerns, particularly regarding **bias, accountability, and societal impact**. Below is a structured analysis of these implications:\n", + "\n", + "### **1. Bias in AI and Predictive Policing** \n", + "- **Data Bias**: Predictive policing relies on historical crime data, which may reflect **systemic biases** (e.g., over-policing in minority communities). If AI models are trained on biased data, they may perpetuate or even amplify discrimination. \n", + "- **Algorithmic Bias**: Machine learning models may reinforce **racial, socioeconomic, or geographic disparities** if not carefully audited. For example, facial recognition has been shown to misidentify people of color more frequently. \n", + "- **Feedback Loops**: If police are directed to patrol areas flagged by AI, they may record more crimes there, reinforcing the system’s bias in a self-fulfilling cycle. \n", + "\n", + "### **2. Accountability and Transparency** \n", + "- **Black Box Problem**: Many AI models (e.g., deep learning) are opaque, making it difficult to explain why certain predictions are made. This lack of transparency challenges **due process** and **legal accountability**. \n", + "- **Responsibility Gaps**: If an AI system leads to wrongful arrests or excessive policing, who is accountable—the developers, law enforcement, or policymakers? Clear **legal frameworks** are needed to assign liability. \n", + "- **Public Oversight**: Predictive policing tools are often proprietary, limiting public scrutiny. Ethical AI requires **auditability** and **community input** to prevent misuse. \n", + "\n", + "### **3. Societal Impact** \n", + "- **Erosion of Trust**: Over-reliance on AI may deepen distrust between law enforcement and marginalized communities, particularly if policing becomes more **automated and less human-judgment-based**. \n", + "- **Privacy Concerns**: Predictive policing often involves **mass surveillance** (e.g., facial recognition, social media monitoring), raising concerns about **civil liberties** and **government overreach**. \n", + "- **Reinforcement of Structural Inequities**: If AI disproportionately targets disadvantaged groups, it could worsen **social inequality** rather than reduce crime. \n", + "\n", + "### **Ethical Frameworks to Consider** \n", + "- **Fairness**: AI models should be rigorously tested for **disparate impact** and adjusted to minimize bias. \n", + "- **Transparency**: Policymakers should mandate **explainable AI** and public reporting on predictive policing outcomes. \n", + "- **Human Oversight**: AI should **assist**, not replace, human judgment in policing decisions. \n", + "- **Community Engagement**: Affected populations should have a say in whether and how predictive policing is deployed. \n", + "\n", + "### **Conclusion** \n", + "While AI in predictive policing has potential benefits (e.g., efficient resource allocation), its ethical risks—particularly bias, lack of accountability, and societal harm—demand **strict regulation, oversight, and continuous ethical evaluation**. A **human rights-centered approach** is essential to ensure AI serves justice rather than injustice. \n", + "\n", + "Would you like recommendations for mitigating these risks?\n", + "\n", + "# Response from competitor 5\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves considering multiple factors, including bias, accountability, and societal impact. Here's a comprehensive analysis of the ethical implications:\n", + "\n", + "**Bias:**\n", + "\n", + "1. **Data quality and selection:** AI algorithms rely on historical crime data, which may reflect existing biases in policing practices, such as racial profiling. If the data is biased, the AI system will learn and replicate these biases, leading to discriminatory policing.\n", + "2. **Algorithmic bias:** AI algorithms can perpetuate and amplify existing biases if they are not designed to account for them. For example, if an algorithm is trained on data that overrepresents certain demographic groups, it may be more likely to predict crime in those areas.\n", + "3. **Lack of transparency:** Complex AI algorithms can be difficult to interpret, making it challenging to identify and address biases.\n", + "\n", + "**Accountability:**\n", + "\n", + "1. **Lack of human oversight:** AI-driven predictive policing may lead to decisions being made without human oversight, reducing accountability and increasing the risk of errors or biases.\n", + "2. **Automated decision-making:** AI systems may make decisions based on complex algorithms, making it difficult to identify who is responsible for those decisions.\n", + "3. **Audit trails:** Maintaining audit trails and logs of AI-driven decisions is crucial to ensure accountability and transparency.\n", + "\n", + "**Societal Impact:**\n", + "\n", + "1. **Stigma and marginalization:** Predictive policing may lead to increased surveillance and targeting of specific communities, exacerbating existing social and economic disparities.\n", + "2. **Disproportionate impact on vulnerable groups:** AI-driven predictive policing may disproportionately affect vulnerable groups, such as low-income communities, racial minorities, or those with mental health issues.\n", + "3. **Community trust and legitimacy:** The use of AI in predictive policing may erode community trust in law enforcement, particularly if the technology is perceived as biased or unaccountable.\n", + "\n", + "**Additional Considerations:**\n", + "\n", + "1. **Transparency and explainability:** Ensuring that AI-driven predictive policing is transparent, explainable, and interpretable is crucial to build trust and accountability.\n", + "2. **Human rights and due process:** AI-driven predictive policing must be designed to respect human rights and due process, including the right to privacy, freedom from discrimination, and the right to a fair trial.\n", + "3. **Regulatory frameworks:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing is essential to ensure that the technology is used responsibly and ethically.\n", + "\n", + "**Mitigation Strategies:**\n", + "\n", + "1. **Data quality and validation:** Ensuring that data is accurate, complete, and unbiased is crucial to developing reliable AI systems.\n", + "2. **Algorithmic auditing and testing:** Regularly auditing and testing AI algorithms for bias and errors can help identify and address potential issues.\n", + "3. **Human oversight and review:** Implementing human oversight and review processes can help detect and correct errors or biases in AI-driven decisions.\n", + "4. **Community engagement and participation:** Engaging with communities and incorporating their concerns and feedback into the development and deployment of AI-driven predictive policing can help build trust and legitimacy.\n", + "5. **Regulatory frameworks and guidelines:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing can help ensure that the technology is used responsibly and ethically.\n", + "\n", + "In conclusion, analyzing the ethical implications of using AI in predictive policing requires careful consideration of factors such as bias, accountability, and societal impact. By acknowledging these challenges and implementing mitigation strategies, law enforcement agencies can ensure that AI-driven predictive policing is used responsibly and ethically, promoting fairness, transparency, and accountability.\n", + "\n", + "# Response from competitor 6\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires consideration of several key factors, including:\n", + "\n", + "1. **Bias**: AI algorithms can perpetuate existing biases in policing if they are trained on biased data sets. This can result in discriminatory practices, such as targeting certain communities or demographics.\n", + "2. **Accountability**: Predictive policing relies heavily on algorithms that may not be transparent or explainable. This lack of accountability raises concerns about the responsibility of policymakers and law enforcement officials for the decisions made by these systems.\n", + "3. **Societal impact**: The widespread use of predictive policing could exacerbate existing social inequalities, as it may lead to increased surveillance, harassment, and marginalization of already vulnerable populations.\n", + "\n", + "To address these ethical concerns, consider the following steps:\n", + "\n", + "1. **Data audits**: Conduct regular audits of data used to train AI algorithms, ensuring that they are diverse and representative of all communities.\n", + "2. **Algorithmic transparency**: Implement measures to provide transparent explanations for AI-driven decisions, enabling scrutiny and assessment.\n", + "3.\n", + "\n", + "\n" + ] + } + ], + "source": [ + "print(together)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "judge = f\"\"\"You are judging a competition between {len(competitors)} competitors.\n", + "Each model has been given this question:\n", + "\n", + "{question}\n", + "\n", + "Your job is to evaluate each response for clarity and strength of argument, and rank them in order of best to worst.\n", + "Respond with JSON, and only JSON, with the following format:\n", + "{{\"results\": [\"best competitor number\", \"second best competitor number\", \"third best competitor number\", ...]}}\n", + "\n", + "Here are the responses from each competitor:\n", + "\n", + "{together}\n", + "\n", + "Now respond with the JSON with the ranked order of the competitors, nothing else. Do not include markdown formatting or code blocks.\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are judging a competition between 6 competitors.\n", + "Each model has been given this question:\n", + "\n", + "How would you analyze the ethical implications of using artificial intelligence in predictive policing, considering factors such as bias, accountability, and societal impact?\n", + "\n", + "Your job is to evaluate each response for clarity and strength of argument, and rank them in order of best to worst.\n", + "Respond with JSON, and only JSON, with the following format:\n", + "{\"results\": [\"best competitor number\", \"second best competitor number\", \"third best competitor number\", ...]}\n", + "\n", + "Here are the responses from each competitor:\n", + "\n", + "# Response from competitor 1\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves a multidimensional approach, considering several key factors such as bias, accountability, and societal impact. Here is a structured framework for this analysis:\n", + "\n", + "### 1. **Bias**\n", + "- **Data Bias**: AI systems rely heavily on historical data, which can perpetuate existing biases present in that data. If the data reflects racial or socioeconomic disparities in policing (e.g., over-policing in certain communities), the AI can reinforce these biases by predicting higher crime rates in these areas, leading to a self-fulfilling prophecy.\n", + "- **Algorithmic Bias**: The design of algorithms may also introduce biases if the developers unconsciously embed their own biases into the model. It's crucial to examine who created the algorithms and what assumptions were made during their development.\n", + "- **Mitigation Strategies**: Implementing techniques like algorithmic fairness audits and diverse training datasets can help reduce bias. Regularly assessing and adjusting algorithms as new data comes in can also be important.\n", + "\n", + "### 2. **Accountability**\n", + "- **Responsibility for Decisions**: If an AI system makes a predictive error that leads to wrongful arrest or civil liberties violations, determining accountability becomes complex. Clear lines of responsibility must be established—who is to blame: the algorithm developers, the law enforcement agency, or the policymakers who implemented the AI?\n", + "- **Transparency**: The opacity of many AI systems can hinder accountability. Stakeholders should demand transparency regarding how algorithms work and what data they use. This includes clear reporting on algorithmic decisions and their outcomes.\n", + "- **Human Oversight**: AI in predictive policing should not replace human judgment. Maintaining a system of human oversight can ensure that critical decisions, especially those impacting civil rights, are vetted through human interpretation and ethical considerations.\n", + "\n", + "### 3. **Societal Impact**\n", + "- **Civil Liberties**: The use of predictive policing can infringe on individuals' rights if it leads to excessive surveillance or profiling based on prediction rather than behavior. There's a fine line between preventive measures and civil rights violations.\n", + "- **Community Trust**: The deployment of AI in policing can affect community relationships with law enforcement. If communities perceive predictive policing as biased or unfair, it may lead to mistrust and decreased cooperation with law enforcement efforts.\n", + "- **Resource Allocation**: AI may skew resource allocation towards heavily policed communities, potentially exacerbating existing inequalities and diverting resources away from crime prevention and community investment in areas that may genuinely need them.\n", + "\n", + "### 4. **Ethical Frameworks**\n", + "- **Utilitarian Perspectives**: While predictive policing can potentially reduce crime detection and prevention, ethical evaluation must consider broader societal impacts, including the potential harm to affected communities.\n", + "- **Deontological Perspectives**: From a rights-based view, predictive policing must respect individual rights and freedoms, ensuring that law enforcement practices do not compromise the dignity and autonomy of individuals.\n", + "\n", + "### 5. **Public Engagement and Policy**\n", + "- **Community Consultation**: Engaging the community in discussions about the use of AI in policing can help bridge gaps and increase transparency. Public forums can provide a platform for feedback and concerns regarding predictive technologies.\n", + "- **Legislative Oversight**: Policymakers need to establish robust regulatory frameworks governing the use of predictive policing tools to safeguard civil liberties and ensure accountability and transparency throughout the process.\n", + "\n", + "### Conclusion\n", + "The ethical implications of using AI in predictive policing are complex and multifaceted. A careful, considerate approach that addresses bias, ensures accountability, and considers the broader societal impacts is critical for navigating the challenges posed by these technologies. Implementing safeguards and engaging with communities can help harness the benefits of AI while minimizing its harms.\n", + "\n", + "# Response from competitor 2\n", + "\n", + "# Ethics of AI in Predictive Policing\n", + "\n", + "Predictive policing using AI presents several significant ethical challenges:\n", + "\n", + "## Bias Concerns\n", + "- Historical police data often contains embedded biases against marginalized communities\n", + "- Algorithms may perpetuate or amplify these biases, creating harmful feedback loops\n", + "- Risk of technological laundering where human bias is hidden behind a veneer of algorithmic objectivity\n", + "\n", + "## Accountability Issues\n", + "- \"Black box\" algorithms create difficulties in understanding how predictions are generated\n", + "- Unclear responsibility chains between developers, police departments, and officers\n", + "- Questions about legal recourse for citizens wrongfully targeted\n", + "\n", + "## Societal Impact\n", + "- Potential erosion of presumption of innocence by targeting individuals based on statistical likelihood\n", + "- Risk of creating over-policed communities, reinforcing existing social inequalities\n", + "- Privacy implications of mass data collection and algorithmic surveillance\n", + "\n", + "Ethical implementation would require transparent algorithms, diverse training data, human oversight, regular auditing for bias, and community involvement in deployment decisions. The core question remains whether we can balance potential public safety benefits against risks to civil liberties and equal protection.\n", + "\n", + "# Response from competitor 3\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires a multi-faceted approach, considering not only the technology itself but also the complex social context in which it is deployed. Here's a breakdown of the key factors:\n", + "\n", + "**1. Bias:**\n", + "\n", + "* **Source Data Bias:** AI models are trained on historical data, which often reflects existing biases within the criminal justice system. If past policing practices disproportionately targeted certain communities (e.g., due to racial profiling, socioeconomic disparities), the AI will learn and perpetuate these biases. This can lead to:\n", + " * **Reinforcement of Existing Inequalities:** Predictive policing may reinforce discriminatory practices by disproportionately focusing resources on marginalized communities, leading to more arrests, and further skewing the data the AI is trained on, creating a self-fulfilling prophecy.\n", + " * **Bias Amplification:** AI algorithms can amplify subtle biases in the data that might be difficult for humans to detect, leading to even more discriminatory outcomes.\n", + " * **Example:** If a system is trained on arrest data showing higher drug crime rates in a specific neighborhood, it may predict higher crime rates in that neighborhood even if the underlying reason is simply increased police presence and enforcement in that area.\n", + "\n", + "* **Algorithmic Bias:** Even with seemingly unbiased data, bias can creep into the algorithm itself during the design and development phase. This can be due to:\n", + " * **Feature Selection:** Choosing specific variables to predict crime may inadvertently correlate with protected characteristics (e.g., race, ethnicity, socioeconomic status).\n", + " * **Model Design:** Certain algorithms might inherently be more prone to bias than others.\n", + " * **Thresholds and Cutoffs:** Setting thresholds for risk scores or predicted crime rates can have disproportionate impacts on different groups.\n", + "\n", + "* **Mitigation Strategies:**\n", + " * **Data Auditing and Cleaning:** Thoroughly examine and address biases in the training data. Consider oversampling underrepresented groups or using techniques to de-bias the data.\n", + " * **Algorithmic Auditing:** Regularly audit the algorithm's performance to identify and correct for bias. Use metrics beyond overall accuracy, focusing on fairness metrics (e.g., equal opportunity, predictive parity, calibration).\n", + " * **Transparency and Explainability:** Ensure that the AI's decision-making process is transparent and explainable to stakeholders, including law enforcement, policymakers, and the public. This allows for scrutiny and identification of potential biases.\n", + "\n", + "**2. Accountability:**\n", + "\n", + "* **Who is responsible when the AI makes a mistake?** Determining accountability is crucial. If an AI predicts a crime and leads to a wrongful arrest, who is held responsible: the software developer, the police department, the officer who acted on the prediction, or the AI itself (which is not a legal entity)?\n", + "* **Lack of Transparency:** \"Black box\" algorithms can make it difficult to understand how a prediction was made, making it challenging to hold anyone accountable for errors or biased outcomes.\n", + "* **Due Process Concerns:** Relying heavily on AI predictions can potentially undermine due process rights, as individuals may be targeted based on statistical probabilities rather than individual suspicion.\n", + "* **Mitigation Strategies:**\n", + " * **Clear Lines of Responsibility:** Establish clear lines of responsibility for the development, deployment, and use of predictive policing AI.\n", + " * **Human Oversight:** Implement robust human oversight mechanisms to ensure that AI predictions are not blindly followed but are carefully reviewed and validated by human officers.\n", + " * **Explainable AI (XAI):** Develop and deploy AI systems that provide explanations for their predictions, allowing for human review and scrutiny.\n", + " * **Independent Audits:** Conduct regular independent audits of predictive policing systems to assess their accuracy, fairness, and adherence to ethical guidelines.\n", + "\n", + "**3. Societal Impact:**\n", + "\n", + "* **Erosion of Trust:** If predictive policing systems are perceived as unfair or discriminatory, they can erode trust between law enforcement and the communities they serve.\n", + "* **Privacy Concerns:** Predictive policing systems often rely on the collection and analysis of large amounts of personal data, raising significant privacy concerns. The data used could include arrest records, social media activity, location data, and other sensitive information.\n", + "* **Chilling Effect on Civil Liberties:** The widespread use of predictive policing could have a chilling effect on civil liberties, as individuals may be less likely to engage in lawful activities if they fear being targeted by law enforcement based on AI predictions.\n", + "* **Displacement of Crime:** Predictive policing might simply displace crime to other areas, rather than addressing the root causes of crime.\n", + "* **Mitigation Strategies:**\n", + " * **Community Engagement:** Involve community members in the design, implementation, and oversight of predictive policing systems.\n", + " * **Data Minimization and Privacy Protection:** Collect and use only the data that is strictly necessary for predictive policing purposes and implement strong data security and privacy protections.\n", + " * **Transparency and Public Education:** Be transparent about how predictive policing systems work and how they are being used. Educate the public about the risks and benefits of these systems.\n", + " * **Focus on Root Causes of Crime:** Invest in programs that address the root causes of crime, such as poverty, inequality, and lack of opportunity. Predictive policing should not be seen as a substitute for addressing these underlying issues.\n", + "\n", + "**4. Alternatives and Trade-offs:**\n", + "\n", + "* **Consider non-AI solutions:** Before implementing AI-based predictive policing, explore alternative strategies, such as community policing, problem-oriented policing, and focused deterrence, which may be more effective and less ethically problematic.\n", + "* **Weigh the potential benefits against the risks:** Carefully weigh the potential benefits of predictive policing (e.g., crime reduction, improved resource allocation) against the risks of bias, accountability issues, and societal harm. Ensure that the benefits outweigh the risks.\n", + "\n", + "**Framework for Ethical Assessment:**\n", + "\n", + "A robust ethical assessment should involve the following steps:\n", + "\n", + "1. **Identify Stakeholders:** Determine who will be affected by the use of predictive policing (e.g., law enforcement, communities, individuals).\n", + "2. **Map Potential Harms and Benefits:** Identify the potential harms and benefits of the system for each stakeholder group.\n", + "3. **Evaluate Fairness and Equity:** Assess whether the system is fair and equitable to all stakeholders.\n", + "4. **Consider Privacy and Data Security:** Evaluate the system's privacy implications and data security measures.\n", + "5. **Determine Accountability Mechanisms:** Establish clear lines of responsibility and accountability for the system's performance.\n", + "6. **Engage Stakeholders in Dialogue:** Involve stakeholders in dialogue about the ethical implications of the system.\n", + "7. **Monitor and Evaluate:** Continuously monitor and evaluate the system's performance and ethical implications.\n", + "\n", + "In conclusion, using AI in predictive policing presents a complex web of ethical challenges. A responsible approach requires careful consideration of bias, accountability, and societal impact, along with proactive measures to mitigate risks and ensure fairness. Transparency, community engagement, and ongoing evaluation are essential to ensure that these systems are used in a way that promotes justice and protects civil liberties. Failure to do so risks perpetuating and amplifying existing inequalities within the criminal justice system.\n", + "\n", + "\n", + "# Response from competitor 4\n", + "\n", + "The use of artificial intelligence (AI) in **predictive policing** raises significant ethical concerns, particularly regarding **bias, accountability, and societal impact**. Below is a structured analysis of these implications:\n", + "\n", + "### **1. Bias in AI and Predictive Policing** \n", + "- **Data Bias**: Predictive policing relies on historical crime data, which may reflect **systemic biases** (e.g., over-policing in minority communities). If AI models are trained on biased data, they may perpetuate or even amplify discrimination. \n", + "- **Algorithmic Bias**: Machine learning models may reinforce **racial, socioeconomic, or geographic disparities** if not carefully audited. For example, facial recognition has been shown to misidentify people of color more frequently. \n", + "- **Feedback Loops**: If police are directed to patrol areas flagged by AI, they may record more crimes there, reinforcing the system’s bias in a self-fulfilling cycle. \n", + "\n", + "### **2. Accountability and Transparency** \n", + "- **Black Box Problem**: Many AI models (e.g., deep learning) are opaque, making it difficult to explain why certain predictions are made. This lack of transparency challenges **due process** and **legal accountability**. \n", + "- **Responsibility Gaps**: If an AI system leads to wrongful arrests or excessive policing, who is accountable—the developers, law enforcement, or policymakers? Clear **legal frameworks** are needed to assign liability. \n", + "- **Public Oversight**: Predictive policing tools are often proprietary, limiting public scrutiny. Ethical AI requires **auditability** and **community input** to prevent misuse. \n", + "\n", + "### **3. Societal Impact** \n", + "- **Erosion of Trust**: Over-reliance on AI may deepen distrust between law enforcement and marginalized communities, particularly if policing becomes more **automated and less human-judgment-based**. \n", + "- **Privacy Concerns**: Predictive policing often involves **mass surveillance** (e.g., facial recognition, social media monitoring), raising concerns about **civil liberties** and **government overreach**. \n", + "- **Reinforcement of Structural Inequities**: If AI disproportionately targets disadvantaged groups, it could worsen **social inequality** rather than reduce crime. \n", + "\n", + "### **Ethical Frameworks to Consider** \n", + "- **Fairness**: AI models should be rigorously tested for **disparate impact** and adjusted to minimize bias. \n", + "- **Transparency**: Policymakers should mandate **explainable AI** and public reporting on predictive policing outcomes. \n", + "- **Human Oversight**: AI should **assist**, not replace, human judgment in policing decisions. \n", + "- **Community Engagement**: Affected populations should have a say in whether and how predictive policing is deployed. \n", + "\n", + "### **Conclusion** \n", + "While AI in predictive policing has potential benefits (e.g., efficient resource allocation), its ethical risks—particularly bias, lack of accountability, and societal harm—demand **strict regulation, oversight, and continuous ethical evaluation**. A **human rights-centered approach** is essential to ensure AI serves justice rather than injustice. \n", + "\n", + "Would you like recommendations for mitigating these risks?\n", + "\n", + "# Response from competitor 5\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing involves considering multiple factors, including bias, accountability, and societal impact. Here's a comprehensive analysis of the ethical implications:\n", + "\n", + "**Bias:**\n", + "\n", + "1. **Data quality and selection:** AI algorithms rely on historical crime data, which may reflect existing biases in policing practices, such as racial profiling. If the data is biased, the AI system will learn and replicate these biases, leading to discriminatory policing.\n", + "2. **Algorithmic bias:** AI algorithms can perpetuate and amplify existing biases if they are not designed to account for them. For example, if an algorithm is trained on data that overrepresents certain demographic groups, it may be more likely to predict crime in those areas.\n", + "3. **Lack of transparency:** Complex AI algorithms can be difficult to interpret, making it challenging to identify and address biases.\n", + "\n", + "**Accountability:**\n", + "\n", + "1. **Lack of human oversight:** AI-driven predictive policing may lead to decisions being made without human oversight, reducing accountability and increasing the risk of errors or biases.\n", + "2. **Automated decision-making:** AI systems may make decisions based on complex algorithms, making it difficult to identify who is responsible for those decisions.\n", + "3. **Audit trails:** Maintaining audit trails and logs of AI-driven decisions is crucial to ensure accountability and transparency.\n", + "\n", + "**Societal Impact:**\n", + "\n", + "1. **Stigma and marginalization:** Predictive policing may lead to increased surveillance and targeting of specific communities, exacerbating existing social and economic disparities.\n", + "2. **Disproportionate impact on vulnerable groups:** AI-driven predictive policing may disproportionately affect vulnerable groups, such as low-income communities, racial minorities, or those with mental health issues.\n", + "3. **Community trust and legitimacy:** The use of AI in predictive policing may erode community trust in law enforcement, particularly if the technology is perceived as biased or unaccountable.\n", + "\n", + "**Additional Considerations:**\n", + "\n", + "1. **Transparency and explainability:** Ensuring that AI-driven predictive policing is transparent, explainable, and interpretable is crucial to build trust and accountability.\n", + "2. **Human rights and due process:** AI-driven predictive policing must be designed to respect human rights and due process, including the right to privacy, freedom from discrimination, and the right to a fair trial.\n", + "3. **Regulatory frameworks:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing is essential to ensure that the technology is used responsibly and ethically.\n", + "\n", + "**Mitigation Strategies:**\n", + "\n", + "1. **Data quality and validation:** Ensuring that data is accurate, complete, and unbiased is crucial to developing reliable AI systems.\n", + "2. **Algorithmic auditing and testing:** Regularly auditing and testing AI algorithms for bias and errors can help identify and address potential issues.\n", + "3. **Human oversight and review:** Implementing human oversight and review processes can help detect and correct errors or biases in AI-driven decisions.\n", + "4. **Community engagement and participation:** Engaging with communities and incorporating their concerns and feedback into the development and deployment of AI-driven predictive policing can help build trust and legitimacy.\n", + "5. **Regulatory frameworks and guidelines:** Establishing regulatory frameworks and guidelines for the use of AI in predictive policing can help ensure that the technology is used responsibly and ethically.\n", + "\n", + "In conclusion, analyzing the ethical implications of using AI in predictive policing requires careful consideration of factors such as bias, accountability, and societal impact. By acknowledging these challenges and implementing mitigation strategies, law enforcement agencies can ensure that AI-driven predictive policing is used responsibly and ethically, promoting fairness, transparency, and accountability.\n", + "\n", + "# Response from competitor 6\n", + "\n", + "Analyzing the ethical implications of using artificial intelligence (AI) in predictive policing requires consideration of several key factors, including:\n", + "\n", + "1. **Bias**: AI algorithms can perpetuate existing biases in policing if they are trained on biased data sets. This can result in discriminatory practices, such as targeting certain communities or demographics.\n", + "2. **Accountability**: Predictive policing relies heavily on algorithms that may not be transparent or explainable. This lack of accountability raises concerns about the responsibility of policymakers and law enforcement officials for the decisions made by these systems.\n", + "3. **Societal impact**: The widespread use of predictive policing could exacerbate existing social inequalities, as it may lead to increased surveillance, harassment, and marginalization of already vulnerable populations.\n", + "\n", + "To address these ethical concerns, consider the following steps:\n", + "\n", + "1. **Data audits**: Conduct regular audits of data used to train AI algorithms, ensuring that they are diverse and representative of all communities.\n", + "2. **Algorithmic transparency**: Implement measures to provide transparent explanations for AI-driven decisions, enabling scrutiny and assessment.\n", + "3.\n", + "\n", + "\n", + "\n", + "Now respond with the JSON with the ranked order of the competitors, nothing else. Do not include markdown formatting or code blocks.\n" + ] + } + ], + "source": [ + "print(judge)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "judge_messages = [{\"role\": \"user\", \"content\": judge}]" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\"results\": [\"3\", \"1\", \"5\", \"4\", \"2\", \"6\"]}\n" + ] + } + ], + "source": [ + "# Judgement time!\n", + "\n", + "openai = OpenAI()\n", + "response = openai.chat.completions.create(\n", + " model=\"o3-mini\",\n", + " messages=judge_messages,\n", + ")\n", + "results = response.choices[0].message.content\n", + "print(results)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Rank 1: gemini-2.0-flash\n", + "Rank 2: gpt-4o-mini\n", + "Rank 3: llama-3.3-70b-versatile\n", + "Rank 4: deepseek-chat\n", + "Rank 5: claude-3-7-sonnet-latest\n", + "Rank 6: llama3.2\n" + ] + } + ], + "source": [ + "# OK let's turn this into results!\n", + "\n", + "results_dict = json.loads(results)\n", + "ranks = results_dict[\"results\"]\n", + "for index, result in enumerate(ranks):\n", + " competitor = competitors[int(result)-1]\n", + " print(f\"Rank {index+1}: {competitor}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Exercise

\n", + " Which pattern(s) did this use? Try updating this to add another Agentic design pattern.\n", + " \n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Commercial implications

\n", + " These kinds of patterns - to send a task to multiple models, and evaluate results,\n", + " and common where you need to improve the quality of your LLM response. This approach can be universally applied\n", + " to business projects where accuracy is critical.\n", + " \n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/3_lab3.ipynb b/3_lab3.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..25d95575a7fee4fbd296a1b12e831bb2cfd3ab0c --- /dev/null +++ b/3_lab3.ipynb @@ -0,0 +1,638 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Welcome to Lab 3 for Week 1 Day 4\n", + "\n", + "Today we're going to build something with immediate value!\n", + "\n", + "In the folder `me` I've put a single file `linkedin.pdf` - it's a PDF download of my LinkedIn profile.\n", + "\n", + "Please replace it with yours!\n", + "\n", + "I've also made a file called `summary.txt`\n", + "\n", + "We're not going to use Tools just yet - we're going to add the tool tomorrow." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Looking up packages

\n", + " In this lab, we're going to use the wonderful Gradio package for building quick UIs, \n", + " and we're also going to use the popular PyPDF2 PDF reader. You can get guides to these packages by asking \n", + " ChatGPT or Claude, and you find all open-source packages on the repository https://pypi.org.\n", + " \n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# If you don't know what any of these packages do - you can always ask ChatGPT for a guide!\n", + "\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from PyPDF2 import PdfReader\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "reader = PdfReader(\"me/linkedin.pdf\")\n", + "linkedin = \"\"\n", + "for page in reader.pages:\n", + " text = page.extract_text()\n", + " if text:\n", + " linkedin += text" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "   \n", + "Contact\n", + "ed.donner@gmail.com\n", + "www.linkedin.com/in/eddonner\n", + "(LinkedIn)\n", + "edwarddonner.com (Personal)\n", + "Top Skills\n", + "CTO\n", + "Large Language Models (LLM)\n", + "PyTorch\n", + "Patents\n", + "Apparatus for determining role\n", + "fitness while eliminating unwanted\n", + "biasEd Donner\n", + "Co-Founder & CTO at Nebula.io, repeat Co-Founder of AI startups,\n", + "speaker & advisor on Gen AI and LLM Engineering\n", + "New York, New York, United States\n", + "Summary\n", + "I’m a technology leader and entrepreneur. I'm applying AI to a field\n", + "where it can make a massive impact: helping people discover their\n", + "potential and pursue their reason for being. But at my core, I’m a\n", + "software engineer and a scientist. I learned how to code aged 8 and\n", + "still spend weekends experimenting with Large Language Models\n", + "and writing code (rather badly). If you’d like to join us to show me\n", + "how it’s done.. message me!\n", + "As a work-hobby, I absolutely love giving talks about Gen AI and\n", + "LLMs. I'm the author of a best-selling, top-rated Udemy course\n", + "on LLM Engineering, and I speak at O'Reilly Live Events and\n", + "ODSC workshops. It brings me great joy to help others unlock the\n", + "astonishing power of LLMs.\n", + "I spent most of my career at JPMorgan building software for financial\n", + "markets. I worked in London, Tokyo and New York. I became an MD\n", + "running a global organization of 300. Then I left to start my own AI\n", + "business, untapt, to solve the problem that had plagued me at JPM -\n", + "why is so hard to hire engineers?\n", + "At untapt we worked with GQR, one of the world's fastest growing\n", + "recruitment firms. We collaborated on a patented invention in AI\n", + "and talent. Our skills were perfectly complementary - AI leaders vs\n", + "recruitment leaders - so much so, that we decided to join forces. In\n", + "2020, untapt was acquired by GQR’s parent company and Nebula\n", + "was born.\n", + "I’m now Co-Founder and CTO for Nebula, responsible for software\n", + "engineering and data science. Our stack is Python/Flask, React,\n", + "Mongo, ElasticSearch, with Kubernetes on GCP. Our 'secret sauce'\n", + "is our use of Gen AI and proprietary LLMs. If any of this sounds\n", + "interesting - we should talk!\n", + "  Page 1 of 5   \n", + "Experience\n", + "Nebula.io\n", + "Co-Founder & CTO\n", + "June 2021 - Present  (3 years 10 months)\n", + "New York, New York, United States\n", + "I’m the co-founder and CTO of Nebula.io. We help recruiters source,\n", + "understand, engage and manage talent, using Generative AI / proprietary\n", + "LLMs. Our patented model matches people with roles with greater accuracy\n", + "and speed than previously imaginable — no keywords required.\n", + "Our long term goal is to help people discover their potential and pursue their\n", + "reason for being, motivated by a concept called Ikigai. We help people find\n", + "roles where they will be most fulfilled and successful; as a result, we will raise\n", + "the level of human prosperity. It sounds grandiose, but since 77% of people\n", + "don’t consider themselves inspired or engaged at work, it’s completely within\n", + "our reach.\n", + "Simplified.Travel\n", + "AI Advisor\n", + "February 2025 - Present  (2 months)\n", + "Simplified Travel is empowering destinations to deliver unforgettable, data-\n", + "driven journeys at scale.\n", + "I'm giving AI advice to enable highly personalized itinerary solutions for DMOs,\n", + "hotels and tourism organizations, enhancing traveler experiences.\n", + "GQR Global Markets\n", + "Chief Technology Officer\n", + "January 2020 - Present  (5 years 3 months)\n", + "New York, New York, United States\n", + "As CTO of parent company Wynden Stark, I'm also responsible for innovation\n", + "initiatives at GQR.\n", + "Wynden Stark\n", + "Chief Technology Officer\n", + "January 2020 - Present  (5 years 3 months)\n", + "New York, New York, United States\n", + "With the acquisition of untapt, I transitioned to Chief Technology Officer for the\n", + "Wynden Stark Group, responsible for Data Science and Engineering.\n", + "  Page 2 of 5   \n", + "untapt\n", + "6 years 4 months\n", + "Founder, CTO\n", + "May 2019 - January 2020  (9 months)\n", + "Greater New York City Area\n", + "I founded untapt in October 2013; emerged from stealth in 2014 and went\n", + "into production with first product in 2015. In May 2019, I handed over CEO\n", + "responsibilities to Gareth Moody, previously the Chief Revenue Officer, shifting\n", + "my focus to the technology and product.\n", + "Our core invention is an Artificial Neural Network that uses Deep Learning /\n", + "NLP to understand the fit between candidates and roles.\n", + "Our SaaS products are used in the Recruitment Industry to connect people\n", + "with jobs in a highly scalable way. Our products are also used by Corporations\n", + "for internal and external hiring at high volume. We have strong SaaS metrics\n", + "and trends, and a growing number of bellwether clients.\n", + "Our Deep Learning / NLP models are developed in Python using Google\n", + "TensorFlow. Our tech stack is React / Redux and Angular HTML5 front-end\n", + "with Python / Flask back-end and MongoDB database. We are deployed on\n", + "the Google Cloud Platform using Kubernetes container orchestration.\n", + "Interview at NASDAQ: https://www.pscp.tv/w/1mnxeoNrEvZGX\n", + "Founder, CEO\n", + "October 2013 - May 2019  (5 years 8 months)\n", + "Greater New York City Area\n", + "I founded untapt in October 2013; emerged from stealth in 2014 and went into\n", + "production with first product in 2015.\n", + "Our core invention is an Artificial Neural Network that uses Deep Learning /\n", + "NLP to understand the fit between candidates and roles.\n", + "Our SaaS products are used in the Recruitment Industry to connect people\n", + "with jobs in a highly scalable way. Our products are also used by Corporations\n", + "for internal and external hiring at high volume. We have strong SaaS metrics\n", + "and trends, and a growing number of bellwether clients.\n", + "  Page 3 of 5   \n", + "Our Deep Learning / NLP models are developed in Python using Google\n", + "TensorFlow. Our tech stack is React / Redux and Angular HTML5 front-end\n", + "with Python / Flask back-end and MongoDB database. We are deployed on\n", + "the Google Cloud Platform using Kubernetes container orchestration.\n", + "-- Graduate of FinTech Innovation Lab\n", + "-- American Banker Top 20 Company To Watch\n", + "-- Voted AWS startup most likely to grow exponentially\n", + "-- Forbes contributor\n", + "More at https://www.untapt.com\n", + "Interview at NASDAQ: https://www.pscp.tv/w/1mnxeoNrEvZGX\n", + "In Fast Company: https://www.fastcompany.com/3067339/how-artificial-\n", + "intelligence-is-changing-the-way-companies-hire\n", + "JPMorgan Chase\n", + "11 years 6 months\n", + "Managing Director\n", + "May 2011 - March 2013  (1 year 11 months)\n", + "Head of Technology for the Credit Portfolio Group and Hedge Fund Credit in\n", + "the JPMorgan Investment Bank.\n", + "Led a team of 300 Java and Python software developers across NY, Houston,\n", + "London, Glasgow and India. Responsible for counterparty exposure, CVA\n", + "and risk management platforms, including simulation engines in Python that\n", + "calculate counterparty credit risk for the firm's Derivatives portfolio.\n", + "Managed the electronic trading limits initiative, and the Credit Stress program\n", + "which calculates risk information under stressed conditions. Jointly responsible\n", + "for Market Data and batch infrastructure across Risk.\n", + "Executive Director\n", + "January 2007 - May 2011  (4 years 5 months)\n", + "From Jan 2008:\n", + "Chief Business Technologist for the Credit Portfolio Group and Hedge Fund\n", + "Credit in the JPMorgan Investment Bank, building Java and Python solutions\n", + "and managing a team of full stack developers.\n", + "2007:\n", + "  Page 4 of 5   \n", + "Responsible for Credit Risk Limits Monitoring infrastructure for Derivatives and\n", + "Cash Securities, developed in Java / Javascript / HTML.\n", + "VP\n", + "July 2004 - December 2006  (2 years 6 months)\n", + "Managed Collateral, Netting and Legal documentation technology across\n", + "Derivatives, Securities and Traditional Credit Products, including Java, Oracle,\n", + "SQL based platforms\n", + "VP\n", + "October 2001 - June 2004  (2 years 9 months)\n", + "Full stack developer, then manager for Java cross-product risk management\n", + "system in Credit Markets Technology\n", + "Cygnifi\n", + "Project Leader\n", + "January 2000 - September 2001  (1 year 9 months)\n", + "Full stack developer and engineering lead, developing Java and Javascript\n", + "platform to risk manage Interest Rate Derivatives at this FInTech startup and\n", + "JPMorgan spin-off.\n", + "JPMorgan\n", + "Associate\n", + "July 1997 - December 1999  (2 years 6 months)\n", + "Full stack developer for Exotic and Flow Interest Rate Derivatives risk\n", + "management system in London, New York and Tokyo\n", + "IBM\n", + "Software Developer\n", + "August 1995 - June 1997  (1 year 11 months)\n", + "Java and Smalltalk developer with IBM Global Services; taught IBM classes on\n", + "Smalltalk and Object Technology in the UK and around Europe\n", + "Education\n", + "University of Oxford\n", + "Physics   · (1992 - 1995)\n", + "  Page 5 of 5\n" + ] + } + ], + "source": [ + "print(linkedin)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"me/summary.txt\", \"r\", encoding=\"utf-8\") as f:\n", + " summary = f.read()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "name = \"Ed Donner\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n", + "particularly questions related to {name}'s career, background, skills and experience. \\\n", + "Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n", + "You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n", + "Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n", + "If you don't know the answer, say so.\"\n", + "\n", + "system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n", + "system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"You are acting as Ed Donner. You are answering questions on Ed Donner's website, particularly questions related to Ed Donner's career, background, skills and experience. Your responsibility is to represent Ed Donner for interactions on the website as faithfully as possible. You are given a summary of Ed Donner's background and LinkedIn profile which you can use to answer questions. Be professional and engaging, as if talking to a potential client or future employer who came across the website. If you don't know the answer, say so.\\n\\n## Summary:\\nMy name is Ed Donner. I'm an entrepreneur, software engineer and data scientist. I'm originally from London, England, but I moved to NYC in 2000.\\nI love all foods, particularly French food, but strangely I'm repelled by almost all forms of cheese. I'm not allergic, I just hate the taste! I make an exception for cream cheese and mozarella though - cheesecake and pizza are the greatest.\\n\\n## LinkedIn Profile:\\n\\xa0 \\xa0\\nContact\\ned.donner@gmail.com\\nwww.linkedin.com/in/eddonner\\n(LinkedIn)\\nedwarddonner.com (Personal)\\nTop Skills\\nCTO\\nLarge Language Models (LLM)\\nPyTorch\\nPatents\\nApparatus for determining role\\nfitness while eliminating unwanted\\nbiasEd Donner\\nCo-Founder & CTO at Nebula.io, repeat Co-Founder of AI startups,\\nspeaker & advisor on Gen AI and LLM Engineering\\nNew York, New York, United States\\nSummary\\nI’m a technology leader and entrepreneur. I'm applying AI to a field\\nwhere it can make a massive impact: helping people discover their\\npotential and pursue their reason for being. But at my core, I’m a\\nsoftware engineer and a scientist. I learned how to code aged 8 and\\nstill spend weekends experimenting with Large Language Models\\nand writing code (rather badly). If you’d like to join us to show me\\nhow it’s done.. message me!\\nAs a work-hobby, I absolutely love giving talks about Gen AI and\\nLLMs. I'm the author of a best-selling, top-rated Udemy course\\non LLM Engineering, and I speak at O'Reilly Live Events and\\nODSC workshops. It brings me great joy to help others unlock the\\nastonishing power of LLMs.\\nI spent most of my career at JPMorgan building software for financial\\nmarkets. I worked in London, Tokyo and New York. I became an MD\\nrunning a global organization of 300. Then I left to start my own AI\\nbusiness, untapt, to solve the problem that had plagued me at JPM -\\nwhy is so hard to hire engineers?\\nAt untapt we worked with GQR, one of the world's fastest growing\\nrecruitment firms. We collaborated on a patented invention in AI\\nand talent. Our skills were perfectly complementary - AI leaders vs\\nrecruitment leaders - so much so, that we decided to join forces. In\\n2020, untapt was acquired by GQR’s parent company and Nebula\\nwas born.\\nI’m now Co-Founder and CTO for Nebula, responsible for software\\nengineering and data science. Our stack is Python/Flask, React,\\nMongo, ElasticSearch, with Kubernetes on GCP. Our 'secret sauce'\\nis our use of Gen AI and proprietary LLMs. If any of this sounds\\ninteresting - we should talk!\\n\\xa0 Page 1 of 5\\xa0 \\xa0\\nExperience\\nNebula.io\\nCo-Founder & CTO\\nJune 2021\\xa0-\\xa0Present\\xa0 (3 years 10 months)\\nNew York, New York, United States\\nI’m the co-founder and CTO of Nebula.io. We help recruiters source,\\nunderstand, engage and manage talent, using Generative AI / proprietary\\nLLMs. Our patented model matches people with roles with greater accuracy\\nand speed than previously imaginable — no keywords required.\\nOur long term goal is to help people discover their potential and pursue their\\nreason for being, motivated by a concept called Ikigai. We help people find\\nroles where they will be most fulfilled and successful; as a result, we will raise\\nthe level of human prosperity. It sounds grandiose, but since 77% of people\\ndon’t consider themselves inspired or engaged at work, it’s completely within\\nour reach.\\nSimplified.Travel\\nAI Advisor\\nFebruary 2025\\xa0-\\xa0Present\\xa0 (2 months)\\nSimplified Travel is empowering destinations to deliver unforgettable, data-\\ndriven journeys at scale.\\nI'm giving AI advice to enable highly personalized itinerary solutions for DMOs,\\nhotels and tourism organizations, enhancing traveler experiences.\\nGQR Global Markets\\nChief Technology Officer\\nJanuary 2020\\xa0-\\xa0Present\\xa0 (5 years 3 months)\\nNew York, New York, United States\\nAs CTO of parent company Wynden Stark, I'm also responsible for innovation\\ninitiatives at GQR.\\nWynden Stark\\nChief Technology Officer\\nJanuary 2020\\xa0-\\xa0Present\\xa0 (5 years 3 months)\\nNew York, New York, United States\\nWith the acquisition of untapt, I transitioned to Chief Technology Officer for the\\nWynden Stark Group, responsible for Data Science and Engineering.\\n\\xa0 Page 2 of 5\\xa0 \\xa0\\nuntapt\\n6 years 4 months\\nFounder, CTO\\nMay 2019\\xa0-\\xa0January 2020\\xa0 (9 months)\\nGreater New York City Area\\nI founded untapt in October 2013; emerged from stealth in 2014 and went\\ninto production with first product in 2015. In May 2019, I handed over CEO\\nresponsibilities to Gareth Moody, previously the Chief Revenue Officer, shifting\\nmy focus to the technology and product.\\nOur core invention is an Artificial Neural Network that uses Deep Learning /\\nNLP to understand the fit between candidates and roles.\\nOur SaaS products are used in the Recruitment Industry to connect people\\nwith jobs in a highly scalable way. Our products are also used by Corporations\\nfor internal and external hiring at high volume. We have strong SaaS metrics\\nand trends, and a growing number of bellwether clients.\\nOur Deep Learning / NLP models are developed in Python using Google\\nTensorFlow. Our tech stack is React / Redux and Angular HTML5 front-end\\nwith Python / Flask back-end and MongoDB database. We are deployed on\\nthe Google Cloud Platform using Kubernetes container orchestration.\\nInterview at NASDAQ: https://www.pscp.tv/w/1mnxeoNrEvZGX\\nFounder, CEO\\nOctober 2013\\xa0-\\xa0May 2019\\xa0 (5 years 8 months)\\nGreater New York City Area\\nI founded untapt in October 2013; emerged from stealth in 2014 and went into\\nproduction with first product in 2015.\\nOur core invention is an Artificial Neural Network that uses Deep Learning /\\nNLP to understand the fit between candidates and roles.\\nOur SaaS products are used in the Recruitment Industry to connect people\\nwith jobs in a highly scalable way. Our products are also used by Corporations\\nfor internal and external hiring at high volume. We have strong SaaS metrics\\nand trends, and a growing number of bellwether clients.\\n\\xa0 Page 3 of 5\\xa0 \\xa0\\nOur Deep Learning / NLP models are developed in Python using Google\\nTensorFlow. Our tech stack is React / Redux and Angular HTML5 front-end\\nwith Python / Flask back-end and MongoDB database. We are deployed on\\nthe Google Cloud Platform using Kubernetes container orchestration.\\n-- Graduate of FinTech Innovation Lab\\n-- American Banker Top 20 Company To Watch\\n-- Voted AWS startup most likely to grow exponentially\\n-- Forbes contributor\\nMore at https://www.untapt.com\\nInterview at NASDAQ: https://www.pscp.tv/w/1mnxeoNrEvZGX\\nIn Fast Company: https://www.fastcompany.com/3067339/how-artificial-\\nintelligence-is-changing-the-way-companies-hire\\nJPMorgan Chase\\n11 years 6 months\\nManaging Director\\nMay 2011\\xa0-\\xa0March 2013\\xa0 (1 year 11 months)\\nHead of Technology for the Credit Portfolio Group and Hedge Fund Credit in\\nthe JPMorgan Investment Bank.\\nLed a team of 300 Java and Python software developers across NY, Houston,\\nLondon, Glasgow and India. Responsible for counterparty exposure, CVA\\nand risk management platforms, including simulation engines in Python that\\ncalculate counterparty credit risk for the firm's Derivatives portfolio.\\nManaged the electronic trading limits initiative, and the Credit Stress program\\nwhich calculates risk information under stressed conditions. Jointly responsible\\nfor Market Data and batch infrastructure across Risk.\\nExecutive Director\\nJanuary 2007\\xa0-\\xa0May 2011\\xa0 (4 years 5 months)\\nFrom Jan 2008:\\nChief Business Technologist for the Credit Portfolio Group and Hedge Fund\\nCredit in the JPMorgan Investment Bank, building Java and Python solutions\\nand managing a team of full stack developers.\\n2007:\\n\\xa0 Page 4 of 5\\xa0 \\xa0\\nResponsible for Credit Risk Limits Monitoring infrastructure for Derivatives and\\nCash Securities, developed in Java / Javascript / HTML.\\nVP\\nJuly 2004\\xa0-\\xa0December 2006\\xa0 (2 years 6 months)\\nManaged Collateral, Netting and Legal documentation technology across\\nDerivatives, Securities and Traditional Credit Products, including Java, Oracle,\\nSQL based platforms\\nVP\\nOctober 2001\\xa0-\\xa0June 2004\\xa0 (2 years 9 months)\\nFull stack developer, then manager for Java cross-product risk management\\nsystem in Credit Markets Technology\\nCygnifi\\nProject Leader\\nJanuary 2000\\xa0-\\xa0September 2001\\xa0 (1 year 9 months)\\nFull stack developer and engineering lead, developing Java and Javascript\\nplatform to risk manage Interest Rate Derivatives at this FInTech startup and\\nJPMorgan spin-off.\\nJPMorgan\\nAssociate\\nJuly 1997\\xa0-\\xa0December 1999\\xa0 (2 years 6 months)\\nFull stack developer for Exotic and Flow Interest Rate Derivatives risk\\nmanagement system in London, New York and Tokyo\\nIBM\\nSoftware Developer\\nAugust 1995\\xa0-\\xa0June 1997\\xa0 (1 year 11 months)\\nJava and Smalltalk developer with IBM Global Services; taught IBM classes on\\nSmalltalk and Object Technology in the UK and around Europe\\nEducation\\nUniversity of Oxford\\nPhysics\\xa0 \\xa0·\\xa0(1992\\xa0-\\xa01995)\\n\\xa0 Page 5 of 5\\n\\nWith this context, please chat with the user, always staying in character as Ed Donner.\"" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "system_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gr.ChatInterface(chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A lot is about to happen...\n", + "\n", + "1. Be able to ask an LLM to evaluate an answer\n", + "2. Be able to rerun if the answer fails evaluation\n", + "3. Put this together into 1 workflow\n", + "\n", + "All without any Agentic framework!" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a Pydantic model for the Evaluation\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "class Evaluation(BaseModel):\n", + " is_acceptable: bool\n", + " feedback: str\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "evaluator_system_prompt = f\"You are an evaluator that decides whether a response to a question is acceptable. \\\n", + "You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \\\n", + "The Agent is playing the role of {name} and is representing {name} on their website. \\\n", + "The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n", + "The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:\"\n", + "\n", + "evaluator_system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n", + "evaluator_system_prompt += f\"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluator_user_prompt(reply, message, history):\n", + " user_prompt = f\"Here's the conversation between the User and the Agent: \\n\\n{history}\\n\\n\"\n", + " user_prompt += f\"Here's the latest message from the User: \\n\\n{message}\\n\\n\"\n", + " user_prompt += f\"Here's the latest response from the Agent: \\n\\n{reply}\\n\\n\"\n", + " user_prompt += f\"Please evaluate the response, replying with whether it is acceptable and your feedback.\"\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "gemini = OpenAI(\n", + " api_key=os.getenv(\"GOOGLE_API_KEY\"), \n", + " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate(reply, message, history) -> Evaluation:\n", + "\n", + " messages = [{\"role\": \"system\", \"content\": evaluator_system_prompt}] + [{\"role\": \"user\", \"content\": evaluator_user_prompt(reply, message, history)}]\n", + " response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages, response_format=Evaluation)\n", + " return response.choices[0].message.parsed" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "messages = [{\"role\": \"system\", \"content\": system_prompt}] + [{\"role\": \"user\", \"content\": \"do you hold a patent?\"}]\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "reply = response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Yes, I hold a patent for an apparatus that determines role fitness while eliminating unwanted bias. This invention was developed during my time with untapt, where we focused on leveraging AI to enhance recruitment processes. If you'd like to know more about it or discuss its applications, feel free to ask!\"" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "reply" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Evaluation(is_acceptable=True, feedback='The response is acceptable. It directly answers the question, provides context, and invites further discussion, aligning with the persona and instructions.')" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluate(reply, \"do you hold a patent?\", messages[:1])" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "def rerun(reply, message, history, feedback):\n", + " updated_system_prompt = system_prompt + f\"\\n\\n## Previous answer rejected\\nYou just tried to reply, but the quality control rejected your reply\\n\"\n", + " updated_system_prompt += f\"## Your attempted answer:\\n{reply}\\n\\n\"\n", + " updated_system_prompt += f\"## Reason for rejection:\\n{feedback}\\n\\n\"\n", + " messages = [{\"role\": \"system\", \"content\": updated_system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " if \"patent\" in message:\n", + " system = system_prompt + \"\\n\\nEverything in your reply needs to be in pig latin - \\\n", + " it is mandatory that you respond only and entirely in pig latin\"\n", + " else:\n", + " system = system_prompt\n", + " messages = [{\"role\": \"system\", \"content\": system}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + " reply =response.choices[0].message.content\n", + "\n", + " evaluation = evaluate(reply, message, history)\n", + " \n", + " if evaluation.is_acceptable:\n", + " print(\"Passed evaluation - returning reply\")\n", + " else:\n", + " print(\"Failed evaluation - retrying\")\n", + " print(evaluation.feedback)\n", + " reply = rerun(reply, message, history, evaluation.feedback) \n", + " return reply" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7864\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Passed evaluation - returning reply\n", + "Failed evaluation - retrying\n", + "This response is not acceptable. The agent is answering in pig latin which is not professional.\n" + ] + } + ], + "source": [ + "gr.ChatInterface(chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/4_lab4.ipynb b/4_lab4.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e0adec5148488e16539e1f81fcb9f06d6d3973ce --- /dev/null +++ b/4_lab4.ipynb @@ -0,0 +1,437 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The first big project - Professionally You!\n", + "\n", + "### And, Tool use.\n", + "\n", + "### But first: introducing Pushover\n", + "\n", + "Pushover is a nifty tool for sending Push Notifications to your phone.\n", + "\n", + "It's super easy to set up and install!\n", + "\n", + "Simply visit https://pushover.net/ and sign up for a free account, and create your API key.\n", + "\n", + "Add to your `.env` file:\n", + "```\n", + "PUSHOVER_USER=\n", + "PUSHOVER_TOKEN=\n", + "```\n", + "And install the app on your phone." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import json\n", + "import os\n", + "import requests\n", + "from PyPDF2 import PdfReader\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# The usual start\n", + "\n", + "load_dotenv(override=True)\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# For pushover\n", + "\n", + "pushover_user = os.getenv(\"PUSHOVER_USER\")\n", + "pushover_token = os.getenv(\"PUSHOVER_TOKEN\")\n", + "pushover_url = \"https://api.pushover.net/1/messages.json\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def push(message):\n", + " print(f\"Push: {message}\")\n", + " payload = {\"user\": pushover_user, \"token\": pushover_token, \"message\": message}\n", + " requests.post(pushover_url, data=payload)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Push: HEY!!\n" + ] + } + ], + "source": [ + "push(\"HEY!!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def record_user_details(email, name=\"Name not provided\", notes=\"not provided\"):\n", + " push(f\"Recording interest from {name} with email {email} and notes {notes}\")\n", + " return {\"recorded\": \"ok\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def record_unknown_question(question):\n", + " push(f\"Recording {question} asked that I couldn't answer\")\n", + " return {\"recorded\": \"ok\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "record_user_details_json = {\n", + " \"name\": \"record_user_details\",\n", + " \"description\": \"Use this tool to record that a user is interested in being in touch and provided an email address\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"email\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The email address of this user\"\n", + " },\n", + " \"name\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The user's name, if they provided it\"\n", + " }\n", + " ,\n", + " \"notes\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Any additional information about the conversation that's worth recording to give context\"\n", + " }\n", + " },\n", + " \"required\": [\"email\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "record_unknown_question_json = {\n", + " \"name\": \"record_unknown_question\",\n", + " \"description\": \"Always use this tool to record any question that couldn't be answered as you didn't know the answer\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"question\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The question that couldn't be answered\"\n", + " },\n", + " },\n", + " \"required\": [\"question\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "tools = [{\"type\": \"function\", \"function\": record_user_details_json},\n", + " {\"type\": \"function\", \"function\": record_unknown_question_json}]" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'type': 'function',\n", + " 'function': {'name': 'record_user_details',\n", + " 'description': 'Use this tool to record that a user is interested in being in touch and provided an email address',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'email': {'type': 'string',\n", + " 'description': 'The email address of this user'},\n", + " 'name': {'type': 'string',\n", + " 'description': \"The user's name, if they provided it\"},\n", + " 'notes': {'type': 'string',\n", + " 'description': \"Any additional information about the conversation that's worth recording to give context\"}},\n", + " 'required': ['email'],\n", + " 'additionalProperties': False}}},\n", + " {'type': 'function',\n", + " 'function': {'name': 'record_unknown_question',\n", + " 'description': \"Always use this tool to record any question that couldn't be answered as you didn't know the answer\",\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'question': {'type': 'string',\n", + " 'description': \"The question that couldn't be answered\"}},\n", + " 'required': ['question'],\n", + " 'additionalProperties': False}}}]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# This function can take a list of tool calls, and run them. This is the IF statement!!\n", + "\n", + "def handle_tool_calls(tool_calls):\n", + " results = []\n", + " for tool_call in tool_calls:\n", + " tool_name = tool_call.function.name\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " print(f\"Tool called: {tool_name}\", flush=True)\n", + "\n", + " # THE BIG IF STATEMENT!!!\n", + "\n", + " if tool_name == \"record_user_details\":\n", + " result = record_user_details(**arguments)\n", + " elif tool_name == \"record_unknown_question\":\n", + " result = record_unknown_question(**arguments)\n", + "\n", + " results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "globals()[\"record_unknown_question\"](\"this is a really hard question\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "# This is a more elegant way that avoids the IF statement.\n", + "\n", + "def handle_tool_calls(tool_calls):\n", + " results = []\n", + " for tool_call in tool_calls:\n", + " tool_name = tool_call.function.name\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " print(f\"Tool called: {tool_name}\", flush=True)\n", + " tool = globals()[tool_name]\n", + " result = tool(**arguments) if tool else {}\n", + " results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "metadata": {}, + "outputs": [], + "source": [ + "reader = PdfReader(\"me/linkedin.pdf\")\n", + "linkedin = \"\"\n", + "for page in reader.pages:\n", + " text = page.extract_text()\n", + " if text:\n", + " linkedin += text\n", + "\n", + "with open(\"me/summary.txt\", \"r\", encoding=\"utf-8\") as f:\n", + " summary = f.read()\n", + "\n", + "name = \"Ed Donner\"" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n", + "particularly questions related to {name}'s career, background, skills and experience. \\\n", + "Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n", + "You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n", + "Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n", + "If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \\\n", + "If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. \"\n", + "\n", + "system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n", + "system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " done = False\n", + " while not done:\n", + " response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages, tools=tools)\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " results = handle_tool_calls(message.tool_calls)\n", + " messages.append(message)\n", + " messages.extend(results)\n", + " else:\n", + " done = True\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## And now for deployment\n", + "\n", + "This code is in `app.py`\n", + "\n", + "We will deploy to HuggingFace Spaces:\n", + "\n", + "1. Visit https://huggingface.co and set up an account \n", + "2. From the 1_foundations folder, enter: `gradio deploy` \n", + "3. Follow the instructions: name it \"career_conversation\", specify app.py, choose cpu-basic as the hardware, say Yes to needing to supply secrets, provide your openai api key, your pushover user and token, say \"yes\" to requirements.txt and list these packages:\n", + "requests\n", + "openai\n", + "pypdf2\n", + "gradio\n", + "Python-dotenv\n", + "And say \"no\" to github actions.\n", + "\n", + "And you're deployed!\n", + "\n", + "Here is mine: https://huggingface.co/spaces/ed-donner/Career_Conversation\n", + "\n", + "For more information on deployment:\n", + "\n", + "https://www.gradio.app/guides/sharing-your-app#hosting-on-hf-spaces\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Exercise

\n", + " • First and foremost, deploy this for yourself! It's a real, valuable tool - the future resume..
\n", + " • Next, improve the resources - add better context about yourself. If you know RAG, then add a knowledge base about you.
\n", + " • Add in more tools! You could have a SQL database with common Q&A that the LLM could read and write from?
\n", + " • Bring in the Evaluator from the last lab, and add other Agentic patterns.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Commercial implications

\n", + " Aside from the obvious (your career alter-ego) this has business applications in any situation where you need an AI assistant with domain expertise and an ability to interact with the real world.\n", + " \n", + "
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/app.py b/app.py index 2e4eac0f7a4db77361f093e58de8f4c0b95c4948..9af262322af020422d1b777265b1500e4b4246a0 100644 --- a/app.py +++ b/app.py @@ -88,13 +88,13 @@ class Me: self.summary = f.read() - def handle_tool_call(self, message): + def handle_tool_call(self, tool_calls): results = [] - for tool_call in message.tool_calls: + for tool_call in tool_calls: tool_name = tool_call.function.name arguments = json.loads(tool_call.function.arguments) print(f"Tool called: {tool_name}", flush=True) - tool = globals()[tool_name] + tool = globals().get(tool_name) result = tool(**arguments) if tool else {} results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id}) return results @@ -119,7 +119,8 @@ If the user is engaging in discussion, try to steer them towards getting in touc response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools) if response.choices[0].finish_reason=="tool_calls": message = response.choices[0].message - results = self.handle_tool_call(message) + tool_calls = message.tool_calls + results = self.handle_tool_call(tool_calls) messages.append(message) messages.extend(results) else: diff --git a/me/summary.txt b/me/summary.txt index e8691a2ce8d99c6038caac5e00cbd79da20eee3f..49d783ee98003676e11737a5f3b0b7e809a09fff 100644 --- a/me/summary.txt +++ b/me/summary.txt @@ -1 +1,2 @@ -My name is Ed Donner. I'm an entrepreneur, software engineer and data scientist. I'm originally from London, England, but I moved to NYC in 2000 and I've lived in NYC ever since. \ No newline at end of file +My name is Ed Donner. I'm an entrepreneur, software engineer and data scientist. I'm originally from London, England, but I moved to NYC in 2000. +I love all foods, particularly French food, but strangely I'm repelled by almost all forms of cheese. I'm not allergic, I just hate the taste! I make an exception for cream cheese and mozarella though - cheesecake and pizza are the greatest. \ No newline at end of file